MapReduce实战-词频统计、文件合并排序_mapreduc 词频统计排序

Bye World

输出:

Bye	1
Hello 1
World 2

输入

  1. 创建输入文件
cd /usr
mkdir file #新建目录
cd file
mkdir WordCount
cd WordCount
echo "Hello World">file1.txt
echo "Bye World">file2.txt #file1、2作为输入文件

在这里插入图片描述

  1. 传入hdfs输入文件夹
hdfs dfs -mkdir -p /user/root #若无root用户文件夹则先创建
hadoop fs -mkdir WordCount #创建一个目录(可自定义)
hadoop fs -mkdir WordCount/input #输入文件夹
hadoop fs -ls WordCount #查看文件
hadoop fs -put /usr/file/WordCount/file*.txt WordCount/input #上传
hadoop fs -ls WordCount/input
#hadoop fs -rmr wc/output #若存在则删除输出目录

在这里插入图片描述

源码

  1. 源代码
cd /usr/file/WordCount
vi WordCount.java

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

    public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {

        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            StringTokenizer itr = new StringTokenizer(value.toString());
            while (itr.hasMoreTokens()) {
                word.set(itr.nextToken());
                context.write(word, one);
            }
        }
    }

    public static class IntSumReducer
            extends Reducer<Text, IntWritable, Text, IntWritable> {
        private IntWritable result = new IntWritable();

        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            result.set(sum);
            context.write(key, result);
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        if (args.length != 2) {
            System.err.println("usage: WordCount <in> <out>");
            System.exit(2);
        }
        Job job = Job.getInstance(conf,"WordCount");
        job.setJarByClass(WordCount.class);
        job.setMapperClass(WordCount.TokenizerMapper.class);
        job.setCombinerClass(WordCount.IntSumReducer.class);
        job.setReducerClass(WordCount.IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

  1. 编译打包
javac WordCount.java -cp $(hadoop classpath)
jar -cvf WordCount.jar *.class

在这里插入图片描述

输出

#hadoop jar jar包位置 主类名 参数1(输入路径) 参数2(输出路径)
hadoop jar /usr/file/WordCount/WordCount.jar WordCount WordCount/input WordCount/output
#查看输出
hadoop fs -cat WordCount/output/*

在这里插入图片描述
在这里插入图片描述
附:重启步骤

exit #退出docker容器
shutdown -r now #重启系统
systemctl start docker #启动docker服务
docker start hadoop1 #启动相应容器
docker start hadoop2
docker start hadoop3
docker exec -it hadoop1 bash #进入主节点
$HADOOP\_HOME/sbin/./start-all.sh #启动集群

合并


对输入文件进行合并,剔除其中重复的内容。
如输入:
file1.txt

20150101 x
20150102 y
20150103 x
20150104 y
20150105 z
20150106 x

file2.txt

20150101 y
20150102 y
20150103 x
20150104 z
20150105 y

输出:

20150101 x
20150101 y
20150102 y
20150103 x
20150104 y
20150104 z
20150105 y
20150105 z
20150106 x

输入

cd /usr/file #没有则新建一个目录
mkdir Merge
cd Merge
vi file1.txt#内容就是上面的file1.txt
vi file2.txt
hadoop fs -mkdir Merge
hadoop fs -mkdir Merge/input
hadoop fs -put /usr/file/Merge/file*.txt Merge/input 
hadoop fs -ls Merge/input

在这里插入图片描述

插播反爬信息 )博主优快云地址:https://wzlodq.blog.youkuaiyun.com/

源码

vi Merge.java
javac Merge.java -cp $(hadoop classpath)
jar -cvf Merge.jar *.class

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;

public class Merge {

    public static class Map extends Mapper<Object, Text, Text, Text> {
        private static Text text = new Text();

        public void map(Object key, Text value, Context content) throws IOException, InterruptedException {
            text = value;
            content.write(text, new Text(""));
        }
    }

    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            context.write(key, new Text(""));
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        if (args.length != 2) {
            System.err.println("usage: Merge <in> <out>");
            System.exit(2);
        }
        Job job = Job.getInstance(conf,"Merge");
        job.setJarByClass(Merge.class);
        job.setMapperClass(Merge.Map.class);
        job.setReducerClass(Merge.Reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

在这里插入图片描述

输出

hadoop jar /usr/file/Merge/Merge.jar Merge Merge/input Merge/output
hadoop fs -cat Merge/output/*

在这里插入图片描述
在这里插入图片描述

排序


读取所有输入文件中的整数,进行升序排序后,输出到一个新文件。
输入:
file1.txt

33
37
12
40

file2.txt

4
16
39
5

file3.txt

1
45
25

输出:
输出的数据格式为每行两个整数,第一个整数位第二个整数的排序为此,第二个整数为原待排列的整数。

1 1
2 4
3 5
4 12
5 16
6 25
7 33
8 37
9 39
10 40
11 45

输入

cd /usr/file #没有则新建一个目录
mkdir Sort
cd Sort
vi file1.txt#内容就是上面的file1.txt
vi file2.txt
vi file3.txt
hadoop fs -mkdir Sort
hadoop fs -mkdir Sort/input
hadoop fs -put /usr/file/Sort/file*.txt Sort/input 
hadoop fs -ls Sort/input

在这里插入图片描述

源码

vi Sort.java
javac Sort.java -cp $(hadoop classpath)
jar -cvf Sort.jar *.class

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Sort {
    public static class Map extends Mapper<Object,Text,IntWritable,IntWritable>{
        private static IntWritable data=new IntWritable();
        public void map(Object key,Text value,Context context) throws IOException, InterruptedException{
            String line=value.toString();
            data.set(Integer.parseInt(line));
            context.write(data, new IntWritable(1));
        }
    }
    public static class Reduce extends Reducer<IntWritable,IntWritable,IntWritable,IntWritable>{
        private static IntWritable linenum=new IntWritable(1);
        public void reduce(IntWritable key,Iterable <IntWritable>values,Context context) throws IOException, InterruptedException{
            for(IntWritable num:values){
                context.write(linenum, key);
                linenum=new IntWritable(linenum.get()+1);
            }

### 使用 MapReduce 实现词频统计 #### 1. 环境准备 为了实现基于Java的Hadoop MapReduce框架下的词频统计,需先准备好环境。这通常涉及安装配置好Hadoop集群以及设置好开发工具以便编写和提交MapReduce作业。 #### 2. 编写Mapper类 Mapper负责处理输入数据并将它们转换成键值对的形式。对于词频统计而言,每遇到一个单词就将其作为key输出一次,并附带value为1表示该单词出现了一次。 ```java public class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> { private final static IntWritable one = new IntWritable(1); private Text word = new Text(); @Override protected void map(Object key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); StringTokenizer itr = new StringTokenizer(line.toLowerCase()); while (itr.hasMoreTokens()) { word.set(itr.nextToken().replaceAll("[^a-z]", "")); if (!word.toString().isEmpty()) { // 排除非字母字符组成的字符串 context.write(word, one); } } } } ``` 这段代码展示了如何定义`TokenizerMapper`类来解析文本行并提取其中的各个词语[^1]。 #### 3. 编写Reducer类 Reducer接收来自多个mapper产生的相同key对应的values列表,对其进行聚合操作。在这个例子中就是累加所有相同的单词所关联的数量值得到最终的结果。 ```java public class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context ) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } ``` 此段落描述了`IntSumReducer`的功能及其具体实现方式。 #### 4. 配置Job 最后一步是在Driver程序里指定要使用的Mapper和Reducer类以及其他必要的参数如输入路径、输出路径等信息。 ```java Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length != 2) { System.err.println("Usage: wordcount <in> <out>"); System.exit(2); } Job job = Job.getInstance(conf, "word count"); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); ``` 上述代码片段说明了怎样构建一个完整的MapReduce任务,包括指定了输入输出文件夹的位置和其他重要属性。 通过以上步骤可以完成基本版的MapReduce词频统计应用。值得注意的是实际部署过程中还需要考虑更多细节比如错误处理机制、性能优化等方面的内容。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值