MapRedu编程模型
MR编程模型:
1、map的输出就是reduce的输入。
2、所有的输入和输出都是键值对形式:
<k1,v1>:map输入,来自hdfs,k1为每行数据偏移量,v1为该行数据
<k2,v2>:map输出
<k3,v3>:reduce输入,k2==k3,v3是集合,元素为v2
<k4,v4>:reduce输出,输出到hdfs,k4==k3,v4为v3内部所有元素算术和
3、在MapReduce的编程中,所有键值对的数据类型都必须是hadoop的数据类型
java类型 hadoop类型
String Text
int IntWritable
long LongWritable
double Doubleable
null Nullable
4、所有的hadoop数据类型均实现了hadoop的序列化,即类的对象可以作为map和reduce的输入输出。
java序列化:类的对象可以作为输入输出的对象。
5、一个mapreduce程序至少有三个类:Mapper子类,Reducer子类,主程序类。
6、如果一个类的对象要作为map和reduce的输入输出,则该类必须实现Writable接口,如果这个对象还
要排序的话,则可以实现WritableComparable接口。
7、如果要写分区类,则该类要继承Partitioner类并且重写getpartition方法,在主程序类中指定分区
类及分区的个数。分区是基于map的输出而建立的。
Mapper
package demo.wc;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
//public class WordCountMapper extends Mapper<k1, v1, k2, v2> {
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key1, Text value1, Context context)
throws IOException, InterruptedException {
/*
* context表示Mapper上下文
* 上文:HDFS
* 下文:Reducer
*/
//得到从HDFS读入的数据: I love Beijing
String str = value1.toString();
//分词
String[] words = str.split(" ");
//输出到Reduce:元组对:(I,1)
for(String w:words){
// k2 单词 v2:记一次数
context.write(new Text(w), new LongWritable(1));
}
}
}
Reducer
package demo.wc;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
//public class WordCountReducer extends Reducer<k3, v3, k4, v4>{
public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
@Override
protected void reduce(Text k3, Iterable<LongWritable> v3,Context context) throws IOException, InterruptedException {
/*
* context代表reducer的上下文
* 上文:mapper
* 下文:HDFS
*/
//对v3进行求和
long total = 0;
for(LongWritable v:v3){
total = total + v.get();
}
//输出: k4 v4
context.write(k3, new LongWritable(total));
}
}
主程序类
package demo.wc;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountMain {
public static void main(String[] args) throws Exception {
// 创建一个job = mapper + reducer
//Job job = new Job(new Configuration());
Job job = Job.getInstance(new Configuration());
//指定任务的入口
job.setJarByClass(WordCountMain.class);
//指定任务的mapper,和输出的数据类型
job.setMapperClass(WordCountMapper.class);
job.setMapOutputKeyClass(Text.class); //就是k2
job.setMapOutputValueClass(LongWritable.class); //就是v2
//指定任务的reducer,和输出的数据类型
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class); //就是k4
job.setOutputValueClass(LongWritable.class);//就是v4
//指定输入和输出目录:HDFS的路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//执行任务
job.waitForCompletion(true);
}
}