Scala程序
package com.doit.spark.day01
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit = {
//创建SparkContext
val conf = new SparkConf().setAppName("WordCount")
SparkContext用来创建最原始的RDD得
val sc: SparkContext = new SparkContext(conf)
//创建RDD(lazy)
val lines: RDD[String] = sc.textFile(args(0))
//Transformation 开始(lazy)
//切分压平
val words: RDD[String] = lines.flatMap(_.split(" "))
//将单词和1组合成新元组
val wordAndOne: RDD[(String, Int)] = words.map((_, 1))
//聚合
val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
//排序
val sorted: RDD[(String, Int)] = reduced.sortBy(_._2, false)
//transformation结束
//Action算子,会触发任务执行
//将数据保存到HDFS
sorted.saveAsTextFile(args(1))
//释放资源
sc.stop()
}
}
Java程序
package com.doit.spark.day01;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Author: 多易教育-胡磊
* Date: 2020/8/4
* Description:
*/
public class JavaWordCount {
public static void main(String[] args) {
SparkConf sparkConf = new SparkConf().setAppName("JavaWordCount");
//创建JavaSparkContext
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
//使用JavaSparkContext创建RDD
JavaRDD<String> lines = jsc.textFile(args[0]);
//调用Transformation
//切分压平
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String s) throws Exception {
return Arrays.asList(s.split(" ")).iterator();
}
});
//将单词和1组合起来
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return Tuple2.apply(s, 1);
}
});
//分组聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//排序,先调换kv的顺序 ->vk
JavaPairRDD<Integer, String> swapped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tuple2) throws Exception {
return tuple2.swap();
}
});
//再排序
JavaPairRDD<Integer, String> sorted = swapped.sortByKey(false);
//再将 vk -> kv
JavaPairRDD<String, Integer> result = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> integerStringTuple2) throws Exception {
return integerStringTuple2.swap();
}
});
//触发Action,将数据保存到hdfs
result.saveAsTextFile(args[1]);
jsc.stop();
}
}
JavaLambda程序
package com.doit.spark.day01;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
/**
* Author: 多易教育-胡磊
* Date: 2020/8/4
* Description:
*/
public class LambdaWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("LambdaWordCount");
JavaSparkContext lsc = new JavaSparkContext(conf);
//通过JavaSparkContext创建JavaRDD
JavaRDD<String> lines = lsc.textFile(args[0]);
//切分压平
JavaRDD<String> words = lines.flatMap(line -> Arrays.stream(line.split(" ")).iterator());
//将单词和1组合
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(w -> Tuple2.apply(w, 1));
//聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey((a, b) -> a + b);
//排序
JavaPairRDD<String, Integer> sorted = reduced.mapToPair(tp -> tp.swap()).sortByKey(false).mapToPair(tp -> tp.swap());
//存到hdfs
sorted.saveAsTextFile(args[1]);
lsc.stop();
}
}