package com.hadoop.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.hadoop.mapper.Mapper;
import com.hadoop.reducer.Reducer;
public class WordCount {
public static void main(String[] args) throws IOException {
//创建任务
Job job=Job.getInstance(new Configuration());
job.setJarByClass(WordCount.class);
//Map
job.setMapperClass(Mapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
FileInputFormat.setInputPaths(job, "/word.txt");
job.setReducerClass(Reducer.class);
//Reduce
job.setReducerClass(Reducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileOutputFormat.setOutputPath(job, new Path("/wcount"));
}
}
package com.hadoop.mapper;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
public class Mapper extends org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, LongWritable>{
@Override
protected void map(LongWritable key, Text value,
org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
String line=value.toString();
String[] words=line.split(" ");
for(String w:words){
context.write(new Text(w), new LongWritable(1));
}
}
}
package com.hadoop.reducer;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
public class Reducer extends org.apache.hadoop.mapreduce.Reducer<Text, LongWritable, Text, LongWritable>{
@Override
protected void reduce(Text key, Iterable<LongWritable> value,
org.apache.hadoop.mapreduce.Reducer<Text, LongWritable, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
long counter=0;
for(LongWritable i:value){
counter+=i.get();
}
context.write(key, new LongWritable(counter));
}
}
将上述工程打成jar包,在hadoop上运行: hadoop -jar 相应的包既可得到wordcount计算结果