package com.founder.hadoop.mapreduce;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class TestMapReduce1 extends Configured implements Tool {
enum Counter {
LINESKIP
}
public static class Map extends
Mapper<LongWritable, Text, NullWritable, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try{
String line = value.toString();
String[] lines = line.split(" ");
String month = lines[0];
String time = lines[1];
String mac = lines[6];
Text out = new Text(month + " " + time + " " + mac );
context.write(NullWritable.get(), out) ;
}catch(java.lang.ArrayIndexOutOfBoundsException e){
context.getCounter(Counter.LINESKIP).increment(1);
return ;
}
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
}
@Override
public int run(String[] arg) throws Exception {
// TODO Auto-generated method stub
Configuration conf = getConf();
Job job = new Job(conf, "TestMapReduce1");// 任务名称
job.setJarByClass(TestMapReduce1.class);// 指定class
FileInputFormat.addInputPath(job, new Path(arg[0]));// 输入路径
FileOutputFormat.setOutputPath(job, new Path(arg[1]));// 输出路径
job.setMapperClass(Map.class);//调用上面Map类作为作为map任务代码
//job.setReducerClass(Reduce.class);
job.setOutputFormatClass(TextOutputFormat.class);//
job.setOutputKeyClass(NullWritable.class);//指定输出key的格式
job.setOutputValueClass(Text.class);//指定输出value的格式
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new TestMapReduce1(),args);
System.exit(res);
}
}
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class TestMapReduce1 extends Configured implements Tool {
enum Counter {
LINESKIP
}
public static class Map extends
Mapper<LongWritable, Text, NullWritable, Text> {
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
try{
String line = value.toString();
String[] lines = line.split(" ");
String month = lines[0];
String time = lines[1];
String mac = lines[6];
Text out = new Text(month + " " + time + " " + mac );
context.write(NullWritable.get(), out) ;
}catch(java.lang.ArrayIndexOutOfBoundsException e){
context.getCounter(Counter.LINESKIP).increment(1);
return ;
}
}
}
public static class Reduce extends Reducer<Text, Text, Text, Text> {
}
@Override
public int run(String[] arg) throws Exception {
// TODO Auto-generated method stub
Configuration conf = getConf();
Job job = new Job(conf, "TestMapReduce1");// 任务名称
job.setJarByClass(TestMapReduce1.class);// 指定class
FileInputFormat.addInputPath(job, new Path(arg[0]));// 输入路径
FileOutputFormat.setOutputPath(job, new Path(arg[1]));// 输出路径
job.setMapperClass(Map.class);//调用上面Map类作为作为map任务代码
//job.setReducerClass(Reduce.class);
job.setOutputFormatClass(TextOutputFormat.class);//
job.setOutputKeyClass(NullWritable.class);//指定输出key的格式
job.setOutputValueClass(Text.class);//指定输出value的格式
job.waitForCompletion(true);
return job.isSuccessful() ? 0 : 1;
}
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new Configuration(), new TestMapReduce1(),args);
System.exit(res);
}
}
本文介绍了一个基于Hadoop MapReduce框架实现的数据处理示例程序。该程序通过自定义的Map阶段读取输入文件,对每一行数据进行解析并转换为特定格式的输出。此外,还介绍了如何设置和运行MapReduce任务。
1138

被折叠的 条评论
为什么被折叠?



