Mapreduce 案例3 -------SortByCount

案例需求还是案例2的网站登录日志的数据需要实现:

(1)要统计每个用户访问网站的次数

(2)按用户访问的次数排序
本次案例的键值对的流程:

以下是实现需求的流程:

1、创建新的Maven项目,如下:

(2)修改pom.xml文件,添加以下代码,添加成功之后在右侧的Maven可以看见dependencies

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>3.1.4</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>3.1.4</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-client-core</artifactId>
            <version>3.1.4</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-examples -->
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-mapreduce-examples</artifactId>
            <version>3.1.4</version>
            <scope>test</scope>
        </dependency>
    </dependencies>

 (3)添加main函数,创建demo 并新建两个java文件---SortByDateFirst和SortByDateSecond,如图:

----------------------------------------------SortByDateFirst.java代码--------------------------------------------

package demo;

        import org.apache.hadoop.conf.Configuration;
        import org.apache.hadoop.fs.Path;
        import org.apache.hadoop.io.IntWritable;
        import org.apache.hadoop.io.Text;
        import org.apache.hadoop.mapreduce.Job;
        import org.apache.hadoop.mapreduce.Mapper;
        import org.apache.hadoop.mapreduce.Reducer;
        import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
        import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
        import org.apache.hadoop.util.GenericOptionsParser;

        import java.io.IOException;
        import java.util.StringTokenizer;

public class SortByDateFirst {
    public static class SplitMapper
            extends Mapper<Object, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
        //value email | date
        public void map(Object key, Text value, Context context
        ) throws IOException, InterruptedException {
            String data[] = value.toString().split("\\|", -1);
            word.set(data[0]);
            context.write(word, one);
        }
    }
    public static class IntSumReducer
            extends Reducer<Text,IntWritable,Text,IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(Text key, Iterable<IntWritable> values,
                           Context context
        ) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            result.set(sum);
            context.write(key, result);
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length < 2) {
            System.err.println("Usage: SortByCountFirst <in> [<in>...] <out>");
            System.exit(2);
        }
        Job job = Job.getInstance(conf, "word count");
        job.setJarByClass(SortByDateFirst.class);
        job.setMapperClass(SplitMapper.class);
        job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        for (int i = 0; i < otherArgs.length - 1; ++i) {
            FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
        }
        FileOutputFormat.setOutputPath(job,
                new Path(otherArgs[otherArgs.length - 1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

 ---------------------------------------------SortByDateSecond.java----------------------------------------------

package demo;

        import org.apache.hadoop.conf.Configuration;
        import org.apache.hadoop.fs.Path;
        import org.apache.hadoop.io.IntWritable;
        import org.apache.hadoop.io.Text;
        import org.apache.hadoop.mapreduce.Job;
        import org.apache.hadoop.mapreduce.Mapper;
        import org.apache.hadoop.mapreduce.Reducer;
        import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
        import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
        import org.apache.hadoop.util.GenericOptionsParser;

        import java.io.IOException;
        import java.util.StringTokenizer;

public class SortByDateSecond {
    public static class SplitMapper
            extends Mapper<Object, Text, IntWritable ,Text> {

        private IntWritable count = new IntWritable(1);
        private Text word = new Text();
        //value email_address \t count
        public void map(Object key, Text value, Context context
        ) throws IOException, InterruptedException {
            String data[] = value.toString().split("\t", -1);
            word.set(data[0]);
            count.set(Integer.parseInt(data[1]));
            context.write(count, word);
        }
    }
    public static class ReverseReducer
            extends Reducer<IntWritable,Text,Text,IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(IntWritable key, Iterable<Text> values,
                           Context context
        ) throws IOException, InterruptedException {
            for (Text val : values) {
                context.write(val,key);
            }
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length < 2) {
            System.err.println("Usage: SortByCountFirst <in> [<in>...] <out>");
            System.exit(2);
        }
        Job job = Job.getInstance(conf, "word count");
        job.setJarByClass(SortByDateSecond.class);
        job.setMapperClass(SplitMapper.class);

        job.setReducerClass(ReverseReducer.class);
        job.setMapOutputKeyClass(IntWritable.class);
        job.setMapOutputValueClass(Text.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        for (int i = 0; i < otherArgs.length - 1; ++i) {
            FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
        }
        FileOutputFormat.setOutputPath(job,
                new Path(otherArgs[otherArgs.length - 1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

2、编译和打包

(1)file-->project Structure-->artifacts中添加一个jar包

(2) 添加Build-->build artifacts-->countbydate-->build,将jar包放在out文件里面。

3、 上传Jar包上传数据

在上传jar包之前我重新创建了一个文件夹用来存放此次MapReduce的数据以及jar包

命令: mkdir ~/sc00

4、运行程序

(1)启动Hadoop集群

(2)创建一个文件并将此次的数据上传到分布式文件系统中。运行第一步:

命令:hdfs dfs -mkdir /scinput

命令:  hdfs dfs -put email_log_with_date.txt /scinput

命令:  yarn jar SortByCount.jar demo.SortByCountFirst /scinput /scresultsFirst

 第一步结果如下:

(3) 第二步统计次数

命令: yarn jar SortByCount.jar demo.SortByDateSecond /scresultsFirst /scresultsSecond

结果:

可参考MapReduce案例二的实现流程:

(5条消息) MapReduce案例2_Countbydate_仄言2997的博客-优快云博客

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值