(2)使用java编写MapReduce程序
定义一个Drive.java类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Driver {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(Driver.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(phoneBean.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(phoneBean.class);
job.setPartitionerClass(Partitioner.class);
job.setNumReduceTasks(3);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}
定义一个phoneBean.java
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
public class phoneBean implements Writable {
private long upFlow;
private long downFlow;
private long sumFlow;
public phoneBean() {
super();
}
public phoneBean(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public void set(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
}
定义Mapper和Reduce方法
mapper
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
public class Mapper extends org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, phoneBean> {
//不能在map方法中new对象,map方法执行频率高,内存消耗大。这也就是需要在bean对象中要有一个空构造方法的原因
phoneBean bean = new phoneBean();
Text k = new Text();
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
//1.获取一行数据()
String line = value.toString();
//2.截取字段()
String[] fields = line.split("\t");
//3.封装bean对象,获取电话号码()
String phoneNum = fields[1];
long upFlow = Long.parseLong(fields[fields.length - 3]);
long downFlow = Long.parseLong(fields[fields.length - 2]);
bean.set(upFlow, downFlow);
k.set(phoneNum);
//4.写出去(将数据输出出去,key和value分别是什么,规定清楚)
context.write(k, bean);
}
}
reducer
import java.io.IOException;
import org.apache.hadoop.io.Text;
public class Reducer extends org.apache.hadoop.mapreduce.Reducer<Text, phoneBean, Text, phoneBean> {
@Override
protected void reduce(Text key, Iterable<phoneBean> values, Context context)
throws IOException, InterruptedException {
//计算总流量
long sum_upFlow = 0;
long sum_downFlow = 0;
for(phoneBean bean : values){
sum_upFlow += bean.getUpFlow();
sum_downFlow += bean.getDownFlow();
}
context.write(key, new phoneBean(sum_upFlow,sum_downFlow));
}
}