package com.zhiyou.bd23.totalorder;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
public class TotalOrder3 {
public static void main(String[] args) throws Exception {
Configuration configuration = new Configuration();
// configuration.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator", ",");
//抽样后计算的中值保存的文件位置
Path partitionerFile = new Path("/partitioner_file");
//在configuration设置分区中值文件的位置
TotalOrderPartitioner.setPartitionFile(configuration, partitionerFile);
Job job = Job.getInstance(configuration);
job.setJarByClass(TotalOrder3.class);
job.setJobName("使用TotalOrderPartitioner全排序实例");
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setPartitionerClass(TotalOrderPartitioner.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setInputFormatClass(KeyValueTextInputFormat.class);
Path inputPath = new Path("/README.txt");
Path outputDir = new Path("/to3");
outputDir.getFileSystem(configuration).delete(outputDir, true);
FileInputFormat.addInputPath(job, inputPath);
FileOutputFormat.setOutputPath(job, outputDir);
job.setNumReduceTasks(2);
//设置采样过程的参数
InputSampler.RandomSampler<Text, NullWritable> sampler = new InputSampler.RandomSampler<Text, NullWritable>(0.1, 20);
//配置抽样过程和写partitioner文件的过程
InputSampler.writePartitionFile(job, sampler);
System.exit(job.waitForCompletion(true)?0:1);
}
}
HADOOP抽样计算
最新推荐文章于 2021-07-22 10:57:06 发布