在一个文件内有如下格式数据,其中有两列分别代表上行流量数据和下行流量数据。
1363157985066 13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 120.197.40.4 4 0 264 0 200
1363157991076 13926435656 20-10-7A-28-CC-0A:CMCC 120.196.100.99 2 4 132 1512 200
1363154400022 13926251106 5C-0E-8B-8B-B1-50:CMCC 120.197.40.4 4 0 240 0 200
1363157993044 18211575961 94-71-AC-CD-E6-18:CMCC-EASY 120.196.100.99 iface.qiyi.com 视频网站 15 12 1527 2106 200
问题一:统计每一个用户(手机号)所耗费的总上行流量、下行流量,总流量。
首先定义一个pojo用来存储上行流量,下行流量和总流量。
public class FlowBean implements WritableComparable<FlowBean>{
private long upFlow;
private long dFlow;
private long sumFlow;
//反序列化时,需要反射调用空参构造函数,所以要显示定义一个
public FlowBean(){}
public FlowBean(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
public void set(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(dFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
dFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + dFlow + "\t" + sumFlow;
}
}
FlowCountMapper根据用户手机号进行分组。
class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean>{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将一行内容转成string
String line = value.toString();
//切分字段
String[] fields = line.split("\t");
//取出手机号
String phoneNbr = fields[1];
//取出上行流量下行流量
long upFlow = Long.parseLong(fields[fields.length-3]);
long dFlow = Long.parseLong(fields[fields.length-2]);
context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
}
}
FlowCountReducer将一个手机号的流量汇总。
class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
long sum_upFlow = 0;
long sum_dFlow = 0;
//遍历所有bean,将其中的上行流量,下行流量分别累加
for(FlowBean bean: values){
sum_upFlow += bean.getUpFlow();
sum_dFlow += bean.getdFlow();
}
FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
context.write(key, resultBean);
}
}
问题二:得出上题结果的基础之上再加一个需求:将统计结果按照总流量倒序排序。
Mapper是根据KEY的compareTo()方法排序的,重写WritableComparable的compareTo()方法。然后将FlowBean作为Key即可实现排序。
public class FlowBean implements WritableComparable<FlowBean>{
private long upFlow;
private long dFlow;
private long sumFlow;
//反序列化时,需要反射调用空参构造函数,所以要显示定义一个
public FlowBean(){}
public FlowBean(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
public void set(long upFlow, long dFlow) {
this.upFlow = upFlow;
this.dFlow = dFlow;
this.sumFlow = upFlow + dFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(dFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
dFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + dFlow + "\t" + sumFlow;
}
@Override
public int compareTo(FlowBean o) {
return this.sumFlow>o.getSumFlow()?-1:1;
}
}
class FlowCountSortMapper extends Mapper<LongWritable, Text, FlowBean, Text> {
FlowBean bean = new FlowBean();
Text v = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 拿到的是上一个统计程序的输出结果,已经是各手机号的总流量信息
String line = value.toString();
String[] fields = line.split("\t");
String phoneNbr = fields[0];
long upFlow = Long.parseLong(fields[1]);
long dFlow = Long.parseLong(fields[2]);
bean.set(upFlow, dFlow);
v.set(phoneNbr);
context.write(bean, v);
}
}
/**
* 根据key来掉, 传过来的是对象, 每个对象都是不一样的, 所以每个对象都调用一次reduce方法
* @author: 张政
* @date: 2016年4月15日-下午7:07:40
* @package_name: shizhan_03_hadoop
* @package_name: com.learn.bigdata.mr.flowsum
*/
class FlowCountSortReducer extends Reducer<FlowBean, Text, Text, FlowBean> {
@Override
protected void reduce(FlowBean bean, Iterable<Text> values, Context context) throws IOException, InterruptedException {
context.write(values.iterator().next(), bean);
}
}
问题三:将统计结果按照手机归属地不同省份输出到不同文件中。
重写partitioner,让相同归属地的号码返回相同的分区号int。
public class ProvincePartitioner extends Partitioner<Text, FlowBean>{
public static HashMap<String, Integer> proviceDict = new HashMap<String, Integer>();
static{
//模拟不同的手机号前缀在不同的省份
proviceDict.put("136", 0);
proviceDict.put("137", 1);
proviceDict.put("138", 2);
proviceDict.put("139", 3);
}
@Override
public int getPartition(Text key, FlowBean value, int numPartitions) {
String prefix = key.toString().substring(0, 3);
Integer provinceId = proviceDict.get(prefix);
return provinceId==null?4:provinceId;
}
}
设置PartitionerClass。
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("mapreduce.framework.name", "yarn");
Job job = Job.getInstance(conf);
/*job.setJar("/home/hadoop/wc.jar");*/
//指定本程序的jar包所在的本地路径
job.setJarByClass(FlowCount.class);
//指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReducer.class);
//指定我们自定义的数据分区器
job.setPartitionerClass(ProvincePartitioner.class);
//同时指定相应“分区”数量的reducetask,假设有5个省
job.setNumReduceTasks(5);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
//指定job的输出结果所在目录
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
/*job.submit();*/
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}