Partioner是通过启动多个map 与Reduce来将文件中的数据进行分组, 在Mapper向Reducer输出之前
对输出进行分组并根据此次分组指定每组数据在那台机器上执行,将结果输出到不同文件。
以下为实现代码:
package com.itbuilder.mr;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.itbuilder.mr.bean.DataBean;
/**
* 手机流量计算
* @author mrh
*
*/
public class GRSDataCount {
public static void main(String[] args) throws Exception {
Job job = Job.getInstance(new Configuration());
job.setJarByClass(GRSDataCount.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(DataBean.class);
job.setMapperClass(DCMapper.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setNumReduceTasks(Integer.parseInt(args[2]));
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DataBean.class);
job.setReducerClass(DCRuducer.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.setPartitionerClass(DCPartioner.class);
job.waitForCompletion(true);
}
/**
*
* @author mrh
*
*/
public static class DCMapper extends Mapper<LongWritable, Text, Text, DataBean> {
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, DataBean>.Context context)
throws IOException, InterruptedException {
String datas[] = value.toString().split("\t");
DataBean dataBean = new DataBean(datas[1], Long.parseLong(datas[8]), Long.parseLong(datas[9]));
context.write(new Text(dataBean.getTelNo()), dataBean);
}
}
/**
* Partitioner
* @author mrh
*
*/
public static class DCPartioner extends Partitioner<Text, DataBean> {
private static Map<String, Integer> providerMap = new HashMap<String, Integer>();
static {
providerMap.put("135", 1);
providerMap.put("136", 1);
providerMap.put("137", 1);
providerMap.put("138", 1);
providerMap.put("139", 1);
providerMap.put("150", 2);
providerMap.put("159", 2);
providerMap.put("180", 3);
providerMap.put("182", 3);
}
@Override
public int getPartition(Text key, DataBean value, int numPartitions) {
String code = key.toString();
Integer partion = providerMap.get(code.substring(0, 3));
if (partion == null) {
return 0;
}
return partion.intValue();
}
}
/**
*
* @author mrh
*
*/
public static class DCRuducer extends Reducer<Text, DataBean, Text, DataBean> {
@Override
protected void reduce(Text key, Iterable<DataBean> beans, Reducer<Text, DataBean, Text, DataBean>.Context context)
throws IOException, InterruptedException {
long upPayLoad = 0;
long downPayLoad = 0;
for (DataBean bean : beans) {
upPayLoad += bean.getUpload();
downPayLoad += bean.getDownload();
}
DataBean outBean = new DataBean(key.toString(), upPayLoad, downPayLoad);
context.write(key, outBean);
}
}
}