`
iceflyingfox
  • 浏览: 51003 次
  • 性别: Icon_minigender_1
  • 来自: 北京
社区版块
存档分类
最新评论

hadoop学习日记三 编写程序

 
阅读更多

配好了hadoop的运行环境,也成功运行了hadoop的例子,接下来仿照hadoop的例子写一个程序在hadoop环境中运行一下。

首先,利用HDFS创建一个文件并写入10000个单词,程序如下

 

 

package com.yeepay.hadoop.hdfs;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class HDFSOperator {

	/**
	 * @param args
	 */
	public static void main(String[] args) {

		// 创建hadoop的配置对象,由name-value这样的一对属性组成,具体形式可参考hadoop的配置文件如core-site.xml
		Configuration configuration = new Configuration();

		try {
			// 根据配置获取文件系统的实例
			FileSystem fileSystem = FileSystem.get(configuration);

			// 指定文件的位置
			Path path = new Path("test/HDFSOperator.txt");

			// 获取输出流
			FSDataOutputStream os = fileSystem.create(path, true);
			
			System.out.println("start to write file");
			
			for (int i = 0; i < 10000; i++) {

				// 向输出流里写入字符
				os.writeChars("test ");
			}
			os.close();
			
			System.out.println("finish to write file");
		} catch (IOException e) {
			e.printStackTrace();
		}
		return;
	}

}
 

然后利用hadoop的MapReduce计算这个文件的字数,程序如下(参考WordCount例子)

 

 

package com.yeepay.hadoop.mapreduce;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {

	public static class TokenizerMapper extends
			Mapper<Object, Text, Text, IntWritable> {

		private static final IntWritable one = new IntWritable(1);

		private Text word = new Text();

		public void map(Object key, Text value, Context context)
				throws IOException, InterruptedException {
			StringTokenizer stringTokenizer = new StringTokenizer(
					value.toString());
			
			System.out.println("TokenizerMapper : current key is " + key.toString());
			System.out.println("TokenizerMapper : current value is " + value.toString());
			
			while (stringTokenizer.hasMoreTokens()) {
				word.set(stringTokenizer.nextToken());
				context.write(word, one);
			}
		}
	}

	public static class IntSumReducer extends
			Reducer<Text, IntWritable, Text, IntWritable> {

		private IntWritable result = new IntWritable();

		public void reduce(Text key, Iterable<IntWritable> values,
				Context context) throws IOException, InterruptedException {
			
			System.out.println("IntSumReducer : current key is " + key.toString());
			
			int sum = 0;
			for (IntWritable value : values) {
				sum = sum + value.get();
				System.out.println("IntSumReducer : current sum is " + sum + " current value is " + value);
			}
			result.set(sum);
			context.write(key, result);
		}
	}

	public static void main(String[] args) throws Exception {

		Configuration conf = new Configuration();

		String[] otherArgs = new GenericOptionsParser(conf, args)
				.getRemainingArgs();
		if (otherArgs.length != 2) {
			System.err.println("Usage:wordcount <int> <out>");
			System.exit(2);
		}
		
		System.out.println("arg0 is : " + otherArgs[0]);
		System.out.println("arg1 is : " + otherArgs[1]);

		System.out.println("start to create job...");
		
		Job job = new Job(conf, "word count");
		job.setJarByClass(WordCount.class);
		job.setMapperClass(TokenizerMapper.class);
		job.setCombinerClass(IntSumReducer.class);
		job.setReducerClass(IntSumReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);

		FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}

}

 

将工程导出成jar包hadoop-sample.jar,然后copy到NameNode上。

首先执行HDFSOperator类

 

 

root@wenbo00:/home/wenbo# hadoop jar hadoop-sample.jar com.yeepay.hadoop.hdfs.HDFSOperator

 

执行命令hadoop fs -lsr 查看结果

 

 

drwxr-xr-x   - root supergroup          0 2012-03-13 19:44 /user/root/input
-rw-r--r--   1 root supergroup         22 2012-03-13 19:44 /user/root/input/file01
-rw-r--r--   1 root supergroup         28 2012-03-13 19:44 /user/root/input/file02
drwxr-xr-x   - root supergroup          0 2012-03-15 03:16 /user/root/output
-rw-r--r--   1 root supergroup          0 2012-03-15 03:16 /user/root/output/_SUCCESS
drwxr-xr-x   - root supergroup          0 2012-03-15 03:16 /user/root/output/_logs
drwxr-xr-x   - root supergroup          0 2012-03-15 03:16 /user/root/output/_logs/history
-rw-r--r--   1 root supergroup      16068 2012-03-15 03:16 /user/root/output/_logs/history/job_201203150214_0003_1331806561441_root_word+count
-rw-r--r--   1 root supergroup      20296 2012-03-15 03:16 /user/root/output/_logs/history/job_201203150214_0003_conf.xml
-rw-r--r--   1 root supergroup         49 2012-03-15 03:16 /user/root/output/part-r-00000
drwxr-xr-x   - root supergroup          0 2012-03-15 03:33 /user/root/test
-rw-r--r--   1 root supergroup     100000 2012-03-15 03:33 /user/root/test/HDFSOperator.txt
 

可以看到在test文件加下已经成功创建了HDFSOperator.txt文件

 

然后执行WordCount类

 

 

root@wenbo00:/home/wenbo# hadoop jar hadoop-sample.jar com.yeepay.hadoop.mapreduce.WordCount test testout

 

可以看到输出结果为

 

 

arg0 is : test
arg1 is : testout
start to create job...
****hdfs://wenbo00:9000/user/root/test
12/03/15 03:34:40 INFO input.FileInputFormat: Total input paths to process : 1
12/03/15 03:34:40 INFO mapred.JobClient: Running job: job_201203150214_0004
12/03/15 03:34:41 INFO mapred.JobClient:  map 0% reduce 0%
12/03/15 03:34:54 INFO mapred.JobClient:  map 100% reduce 0%
12/03/15 03:35:06 INFO mapred.JobClient:  map 100% reduce 100%
12/03/15 03:35:11 INFO mapred.JobClient: Job complete: job_201203150214_0004
12/03/15 03:35:11 INFO mapred.JobClient: Counters: 29
12/03/15 03:35:11 INFO mapred.JobClient:   Job Counters
12/03/15 03:35:11 INFO mapred.JobClient:     Launched reduce tasks=1
12/03/15 03:35:11 INFO mapred.JobClient:     SLOTS_MILLIS_MAPS=14358
12/03/15 03:35:11 INFO mapred.JobClient:     Total time spent by all reduces waiting after reserving slots (ms)=0
12/03/15 03:35:11 INFO mapred.JobClient:     Total time spent by all maps waiting after reserving slots (ms)=0
12/03/15 03:35:11 INFO mapred.JobClient:     Rack-local map tasks=1
12/03/15 03:35:11 INFO mapred.JobClient:     Launched map tasks=1
12/03/15 03:35:11 INFO mapred.JobClient:     SLOTS_MILLIS_REDUCES=10869
12/03/15 03:35:11 INFO mapred.JobClient:   File Output Format Counters
12/03/15 03:35:11 INFO mapred.JobClient:     Bytes Written=16
12/03/15 03:35:11 INFO mapred.JobClient:   FileSystemCounters
12/03/15 03:35:11 INFO mapred.JobClient:     FILE_BYTES_READ=22
12/03/15 03:35:11 INFO mapred.JobClient:     HDFS_BYTES_READ=100116
12/03/15 03:35:11 INFO mapred.JobClient:     FILE_BYTES_WRITTEN=43033
12/03/15 03:35:11 INFO mapred.JobClient:     HDFS_BYTES_WRITTEN=16
12/03/15 03:35:11 INFO mapred.JobClient:   File Input Format Counters
12/03/15 03:35:11 INFO mapred.JobClient:     Bytes Read=100000
12/03/15 03:35:11 INFO mapred.JobClient:   Map-Reduce Framework
12/03/15 03:35:11 INFO mapred.JobClient:     Map output materialized bytes=22
12/03/15 03:35:11 INFO mapred.JobClient:     Map input records=1
12/03/15 03:35:11 INFO mapred.JobClient:     Reduce shuffle bytes=22
12/03/15 03:35:11 INFO mapred.JobClient:     Spilled Records=2
12/03/15 03:35:11 INFO mapred.JobClient:     Map output bytes=140000
12/03/15 03:35:11 INFO mapred.JobClient:     CPU time spent (ms)=3420
12/03/15 03:35:11 INFO mapred.JobClient:     Total committed heap usage (bytes)=176099328
12/03/15 03:35:11 INFO mapred.JobClient:     Combine input records=10000
12/03/15 03:35:11 INFO mapred.JobClient:     SPLIT_RAW_BYTES=116
12/03/15 03:35:11 INFO mapred.JobClient:     Reduce input records=1
12/03/15 03:35:11 INFO mapred.JobClient:     Reduce input groups=1
12/03/15 03:35:11 INFO mapred.JobClient:     Combine output records=1
12/03/15 03:35:11 INFO mapred.JobClient:     Physical memory (bytes) snapshot=238370816
12/03/15 03:35:11 INFO mapred.JobClient:     Reduce output records=1
12/03/15 03:35:11 INFO mapred.JobClient:     Virtual memory (bytes) snapshot=1004232704
12/03/15 03:35:11 INFO mapred.JobClient:     Map output records=10000

 

最终计算的数字为10000,程序执行完成。

 

遇到的问题:

 

两个程序的执行没有任何输出结果

 

原因:没有写程序的退出语句,如HDFSOperator中的return语句和WordCount中的System.exit语句。

 

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics