需求
- 对文本文件内的每个单词都统计出其出现的次数。
- 按照每个单词出现次数的数量,降序排序。
Java实现
/**
* 排序的wordcount程序
*/
public class SortWordCount {
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("SortWordCountJava").setMaster("local");
JavaSparkContext javaSparkContext = new JavaSparkContext(sparkConf);
JavaRDD<String> linesRDD = javaSparkContext.textFile("E:\\testdata\\wordcount\\input\\1.txt");
JavaRDD<String> wordsRDD = linesRDD.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterable<String> call(String s) throws Exception {
return Arrays.asList(s.split(" "));
}
});
JavaPairRDD<String, Integer> pairs = wordsRDD.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
JavaPairRDD<String, Integer> wordsNum = pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer integer, Integer integer2) throws Exception {
return integer + integer2;
}
});
// 到这里为止,就得到了每个单词出现的次数
// 但是,问题是,我们的新需求,是要按照每个单词出现次数的顺序,降序排序
// wordCounts RDD内的元素是什么?应该是这种格式的吧:(hello, 3) (you, 2)
// 我们需要将RDD转换成(3, hello) (2, you)的这种格式,才能根据单词出现次数进行排序把!
// 进行key-value的反转映射
JavaPairRDD<Integer, String> numsWord = wordsNum.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
return new Tuple2<Integer, String>(stringIntegerTuple2._2, stringIntegerTuple2._1);
}
});
// 按照key进行排序
JavaPairRDD<Integer, String> sortedNumsToWord = numsWord.sortByKey(false);
// 再次将value-key进行反转映射
JavaPairRDD<String, Integer> sortedWordsNum = sortedNumsToWord.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> integerStringTuple2) throws Exception {
return new Tuple2<String, Integer>(integerStringTuple2._2, integerStringTuple2._1);
}
});
// 到此为止,我们获得了按照单词出现次数排序后的单词计数
// 打印出来
sortedWordsNum.foreach(new VoidFunction<Tuple2<String, Integer>>() {
@Override
public void call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
System.out.println(stringIntegerTuple2._1 + " appears " + stringIntegerTuple2._2 + " times");
}
});
javaSparkContext.close();
}
}
Scala实现
object SortWordCount {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("SortWordCountScala").setMaster("local")
val context = new SparkContext(conf)
val linesRDD = context.textFile("E:\\testdata\\wordcount\\input\\1.txt")
val wordsRDD = linesRDD.flatMap(line => line.split(" "))
val wordNumRDD = wordsRDD.map(word => (word,1))
val reduceWordNumRDD = wordNumRDD.reduceByKey(_ + _)
val reverseWordNum = reduceWordNumRDD.map(wordNum => (wordNum._2, wordNum._1))
val sortedWordNum = reverseWordNum.sortByKey(false)
val resultWordNum = sortedWordNum.map(wordNum => (wordNum._2, wordNum._1))
resultWordNum.foreach(wordNum => println(wordNum._1 + " appears " + wordNum._2 + " times"))
}
}