文本输出TextOutputFormat
默认的输出格式是TextOutputFormat,他把每条记录写为文本行,他的键和值可以是任意类型,因为TextOutputFormat 调用toString() 方法把他们转换为字符串
SequenceFileOutputFormat
将SequenceFileOutputFormat 输出作为后续mapreduce任务的输入,这便是一种好的输出格式,因为它的格式紧凑,很容易被压缩
OutputFormat
用户还可以自定义OutputFormat
自定义OutputFormat
使用场景
为了实现控制最终文件的输出路径和输出格式,可以自定义OutputFormat
例如:要在一个mapreduce程序中根据数据的不同输出两类结果到不同目录,这类灵活的输出需求可以通过自定义OutputFormat来实现
步骤
- 自定义一个类继承FileOutputFormat
- 改写RecordWriter ,具体改写输出数据的方法write()
(1)编写FilterMapper类
package com.hadwinling.mapreduce.outputformat;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/22 下午2:49
*/
public class FilterMapper extends Mapper<LongWritable , Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
context.write(value,NullWritable.get());
}
}
(2)编写FilterReducer类
package com.hadwinling.mapreduce.outputformat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/22 下午2:58
*/
public class FilterReducer extends Reducer<Text, NullWritable,Text,NullWritable> {
Text k = new Text();
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
String line = key.toString();
line = line +"\t\n";
k.set(line);
// 防止有重复的数据
for (NullWritable nullWritable : values) {
context.write(k, NullWritable.get());
}
}
}
(3)自定义一个OutputFormat类
package com.hadwinling.mapreduce.outputformat;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/22 下午3:01
*/
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable>{
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job)
throws IOException, InterruptedException {
return new FRecordWriter(job);
}
}
(4)编写RecordWriter类
package com.hadwinling.mapreduce.outputformat;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/22 下午3:03
*/
public class FRecordWriter extends RecordWriter<Text, NullWritable> {
FSDataOutputStream fosatguigu;
FSDataOutputStream fosother;
public FRecordWriter(TaskAttemptContext job) {
try {
// 1 获取文件系统
FileSystem fs = FileSystem.get(job.getConfiguration());
// 2 创建输出到atguigu.log的输出流
fosatguigu = fs.create(new Path("e:/atguigu.log"));
// 3 创建输出到other.log的输出流
fosother = fs.create(new Path("e:/other.log"));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
@Override
public void write(Text key, NullWritable value) throws IOException, InterruptedException {
// 判断key当中是否有atguigu,如果有写出到atguigu.log 如果没有写出到other.log
if (key.toString().contains("atguigu")) {
// atguigu输出流
fosatguigu.write(key.toString().getBytes());
}else {
fosother.write(key.toString().getBytes());
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
IOUtils.closeStream(fosatguigu);
IOUtils.closeStream(fosother);
}
}
(5)编写FilterDriver类
package com.hadwinling.mapreduce.outputformat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author :HadwinLing
* @version 1.0
* @description: TODO
* @date 2020/11/22 下午3:04
*/
public class FilterDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(FilterDriver.class);
job.setJarByClass(FilterMapper.class);
job.setJarByClass(FilterReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 要将自定义的输出格式组件设置到job中
job.setOutputFormatClass(FilterOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
// 虽然我们自定义了outputformat,但是因为我们的outputformat继承自fileoutputformat
// 而fileoutputformat要输出一个_SUCCESS文件,所以,在这还得指定一个输出目录
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}