文章标题

Mapreduce在Map与Reduce之间的处理,会对Key进行升序排序,如果这个Key是Text类型则是按Key的首字母进行升序排序的,如果Key是IntWritable类型,则按大小进行升序排序,利用这点,可以对数据进行排序。

比如如下的数据:


要排成如下的形式:


在Map过程将这些数据摆进Context这个数据字典的时候,除了需要注意类型的匹配以外,还要注意Mapreduce对Key的去重特性,具体见《【Mapreduce】去除重复的行》(点击打开链接)。因此,在Map的Key是这个值,Value应该放一个随机数去占Reduce阶段中Iterable<IntWritable> values的位置,到Reduce输出到文件阶段,Iterable<IntWritable> values有多少个随机数,就向文件输出多少个Iterable<IntWritable> values所对应的Key。因此代码如下:

  1. import java.io.IOException;  
  2. import java.util.Random;  
  3.   
  4. import org.apache.hadoop.conf.Configuration;  
  5. import org.apache.hadoop.fs.Path;  
  6. import org.apache.hadoop.io.IntWritable;  
  7. import org.apache.hadoop.io.Text;  
  8. import org.apache.hadoop.mapreduce.Job;  
  9. import org.apache.hadoop.mapreduce.Mapper;  
  10. import org.apache.hadoop.mapreduce.Reducer;  
  11. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
  12. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  13. import org.apache.hadoop.util.GenericOptionsParser;  
  14.   
  15. public class MyMapReduce {  
  16.   
  17.     public static class MyMapper extends  
  18.             Mapper<Object, Text, IntWritable, IntWritable> {  
  19.         //Map阶段的两个压入context的参数(也就是第3、4个参数)类型皆修改为IntWritable,而不是Text  
  20.         public void map(Object key, Text value, Context context)  
  21.                 throws IOException, InterruptedException {  
  22.             IntWritable data = new IntWritable(Integer.parseInt(value.toString()));//将输入文件的每一行擦写成IntWritable  
  23.             IntWritable random = new IntWritable(new Random().nextInt());//搞个随机数  
  24.             context.write(data, random);  
  25.         }  
  26.     }  
  27.   
  28.     public static class MyReducer extends  
  29.             Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {  
  30.         //将reducer得到的两个数据类型(第1、2个参数)标识为IntWritable,而不是Text  
  31.         //将reducer写到文件的两个数据类型(第3、4个参数)标识为IntWritable,而不是Text  
  32.         public void reduce(IntWritable key, Iterable<IntWritable> values,  
  33.                 Context context) throws IOException, InterruptedException {  
  34.             while (values.iterator().hasNext()) {//遍历values,有1个随机数,输出一次key  
  35.                 context.write(key, null);  
  36.                 values.iterator().next();//记得遍历的时候,将游标(迭代器)向后推  
  37.             }  
  38.         }  
  39.     }  
  40.       
  41.     public static void main(String[] args) throws Exception {  
  42.         Configuration conf = new Configuration();  
  43.   
  44.         String[] otherArgs = new GenericOptionsParser(conf, args)  
  45.                 .getRemainingArgs();  
  46.         if (otherArgs.length != 2) {  
  47.             System.err.println(”Usage: wordcount <in> <out>”);  
  48.             System.exit(2);  
  49.         }  
  50.         Job job = new Job(conf, “”);  
  51.         job.setMapperClass(MyMapper.class);  
  52.         job.setReducerClass(MyReducer.class);  
  53.         job.setOutputKeyClass(IntWritable.class);//表示写到文件的Key是IntWritable,而不是Text  
  54.         job.setOutputValueClass(IntWritable.class);  
  55.         FileInputFormat.addInputPath(job, new Path(otherArgs[0]));  
  56.         FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));  
  57.         System.exit(job.waitForCompletion(true) ? 0 : 1);  
  58.     }  
  59.   
  60. }  
import java.io.IOException;
import java.util.Random;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class MyMapReduce {

    public static class MyMapper extends
            Mapper<Object, Text, IntWritable, IntWritable> {
        //Map阶段的两个压入context的参数(也就是第3、4个参数)类型皆修改为IntWritable,而不是Text
        public void map(Object key, Text value, Context context)
                throws IOException, InterruptedException {
            IntWritable data = new IntWritable(Integer.parseInt(value.toString()));//将输入文件的每一行擦写成IntWritable
            IntWritable random = new IntWritable(new Random().nextInt());//搞个随机数
            context.write(data, random);
        }
    }

    public static class MyReducer extends
            Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
        //将reducer得到的两个数据类型(第1、2个参数)标识为IntWritable,而不是Text
        //将reducer写到文件的两个数据类型(第3、4个参数)标识为IntWritable,而不是Text
        public void reduce(IntWritable key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {
            while (values.iterator().hasNext()) {//遍历values,有1个随机数,输出一次key
                context.write(key, null);
                values.iterator().next();//记得遍历的时候,将游标(迭代器)向后推
            }
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();

        String[] otherArgs = new GenericOptionsParser(conf, args)
                .getRemainingArgs();
        if (otherArgs.length != 2) {
            System.err.println("Usage: wordcount <in> <out>");
            System.exit(2);
        }
        Job job = new Job(conf, "");
        job.setMapperClass(MyMapper.class);
        job.setReducerClass(MyReducer.class);
        job.setOutputKeyClass(IntWritable.class);//表示写到文件的Key是IntWritable,而不是Text
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}

那么,如果我想降序排序以上的结果,也就是达到如下图的目标,该怎么办呢?


你需要自己重写Hadoop一个类似于《【Java】Collections中sort方法Comparator的重写》(点击打开链接)的比较器IntWritable.Comparator,重写原来的返回值,将其正数改成负数,同时在主函数设置Mapreduce在Map与Reduce中间的Sort阶段使用这个比较器,代码如下所示:

.

  1. import java.io.IOException;  
  2. import java.util.Random;  
  3.   
  4. import org.apache.hadoop.conf.Configuration;  
  5. import org.apache.hadoop.fs.Path;  
  6. import org.apache.hadoop.io.IntWritable;  
  7. import org.apache.hadoop.io.Text;  
  8. import org.apache.hadoop.mapreduce.Job;  
  9. import org.apache.hadoop.mapreduce.Mapper;  
  10. import org.apache.hadoop.mapreduce.Reducer;  
  11. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
  12. import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  13. import org.apache.hadoop.util.GenericOptionsParser;  
  14.   
  15. public class MyMapReduce {  
  16.   
  17.     public static class MyMapper extends  
  18.             Mapper<Object, Text, IntWritable, IntWritable> {  
  19.         public void map(Object key, Text value, Context context)  
  20.                 throws IOException, InterruptedException {  
  21.             IntWritable data = new IntWritable(Integer.parseInt(value.toString()));  
  22.             IntWritable random = new IntWritable(new Random().nextInt());  
  23.             context.write(data, random);  
  24.         }  
  25.     }  
  26.   
  27.     public static class MyReducer extends  
  28.             Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {  
  29.         public void reduce(IntWritable key, Iterable<IntWritable> values,  
  30.                 Context context) throws IOException, InterruptedException {  
  31.             while (values.iterator().hasNext()) {  
  32.                 context.write(key, null);  
  33.                 values.iterator().next();  
  34.             }  
  35.         }  
  36.     }  
  37.   
  38.     //使Sort阶段的Key降序排列的比较器  
  39.     public static class IntWritableDecreasingComparator extends  
  40.             IntWritable.Comparator {  
  41.         public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {  
  42.             return -super.compare(b1, s1, l1, b2, s2, l2);  
  43.         }  
  44.     }  
  45.   
  46.     public static void main(String[] args) throws Exception {  
  47.         Configuration conf = new Configuration();  
  48.   
  49.         String[] otherArgs = new GenericOptionsParser(conf, args)  
  50.                 .getRemainingArgs();  
  51.         if (otherArgs.length != 2) {  
  52.             System.err.println(”Usage: wordcount <in> <out>”);  
  53.             System.exit(2);  
  54.         }  
  55.         Job job = new Job(conf, “”);  
  56.         job.setMapperClass(MyMapper.class);  
  57.         job.setReducerClass(MyReducer.class);  
  58.         job.setOutputKeyClass(IntWritable.class);  
  59.         job.setOutputValueClass(IntWritable.class);  
  60.   
  61.         FileInputFormat.addInputPath(job, new Path(otherArgs[0]));  
  62.         FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));  
  63.         job.setSortComparatorClass(IntWritableDecreasingComparator.class);//设置Sort阶段使用比较器  
  64.         System.exit(job.waitForCompletion(true) ? 0 : 1);  
  65.     }  
  66.   
  67. }  
import java.io.IOException;
import java.util.Random;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class MyMapReduce {

    public static class MyMapper extends
            Mapper<Object, Text, IntWritable, IntWritable> {
        public void map(Object key, Text value, Context context)
                throws IOException, InterruptedException {
            IntWritable data = new IntWritable(Integer.parseInt(value.toString()));
            IntWritable random = new IntWritable(new Random().nextInt());
            context.write(data, random);
        }
    }

    public static class MyReducer extends
            Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
        public void reduce(IntWritable key, Iterable<IntWritable> values,
                Context context) throws IOException, InterruptedException {
            while (values.iterator().hasNext()) {
                context.write(key, null);
                values.iterator().next();
            }
        }
    }

    //使Sort阶段的Key降序排列的比较器
    public static class IntWritableDecreasingComparator extends
            IntWritable.Comparator {
        public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
            return -super.compare(b1, s1, l1, b2, s2, l2);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();

        String[] otherArgs = new GenericOptionsParser(conf, args)
                .getRemainingArgs();
        if (otherArgs.length != 2) {
            System.err.println("Usage: wordcount <in> <out>");
            System.exit(2);
        }
        Job job = new Job(conf, "");
        job.setMapperClass(MyMapper.class);
        job.setReducerClass(MyReducer.class);
        job.setOutputKeyClass(IntWritable.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        job.setSortComparatorClass(IntWritableDecreasingComparator.class);//设置Sort阶段使用比较器
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值