Spark算子常规训练一

本文介绍了四个基于Spark的算子应用场景,包括统计单词总长度、统计所有单词及其出现次数、简单单词统计以及获取统计结果的场景。通过这些实例,读者可以深入理解Spark的RDD处理能力。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

应用场景一:统计单词总长度,以及类似场景

/**
 *
 * 统计单词总长度,以及类似场景
 * map应用
 *
 * @author ccc
 */
public class HelloWorldMap implements Serializable {

    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("HelloWorldMap").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(sparkConf);
        // 创建rdd
        // 元素"abc","a"
        JavaRDD<String> lines = sc.textFile("/Users/ccc/Documents/data.txt");
        // "abc" -> 3
        // "a" -> 1
        JavaRDD<Integer> lineLengths = lines.map(line -> line.length());
        lineLengths.persist(StorageLevel.MEMORY_ONLY());
        lineLengths.foreach(lineLength -> System.out.println(lineLength));
        // 1+2+3+4=10
        // (1,2)->(3,3)->(6,4)->10
        Integer totalLength = lineLengths.reduce((lineLengthA, lineLengthB) -> lineLengthA + lineLengthB);
        System.out.println(totalLength);
    }
}

应用场景二:统计所有单词以及对应个数,获取统计结果,以及类似场景

/**
 *
 * 统计所有单词以及对应个数,获取统计结果,以及类似场景
 * MapToPair应用
 *
 * @author ccc
 */
public class HelloWorldMapToPair implements Serializable {

    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("HelloWorldMapToPair").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(sparkConf);
        // 元素"a","a","b"
        // ("a",1),("a",1),("b",1)
        // ("a",2),("b",1)
        JavaRDD<String> lines = sc.textFile("/Users/ccc/Documents/data.txt");
        JavaPairRDD<String, Integer> pairs = lines.mapToPair(line -> new Tuple2<>(line, 1));
        JavaPairRDD<String, Integer> counts = pairs.reduceByKey((lineA, lineB) -> lineA + lineB);
        counts.foreach(count -> System.out.println(count._1() + ":" + count._2()));
        List<Tuple2<String, Integer>> tuple2List = counts.sortByKey().collect();
        tuple2List.forEach(tuple2 -> System.out.println(tuple2._1() + ":" + tuple2._2()));
    }
}

应用场景三:统计所有单词,以及类似场景

/**
 *
 * 统计所有单词,以及类似场景
 * FlatMap应用
 *
 * @author ccc
 */
public class HelloWorldFlatMap implements Serializable {

    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("HelloWorldFlatMap").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(sparkConf);
        // 元素"a b","c","d"
        JavaRDD<String> lines = sc.textFile("/Users/ccc/Documents/data2.txt");
        // ["a b","c","d"] -> ("a","b","c","d")
        JavaRDD<String> flatMapRDD = lines.flatMap((FlatMapFunction<String, String>) line -> Arrays.asList(line.split(" ")).iterator());
        JavaRDD<String> sortRDD = flatMapRDD.sortBy((Function<String, String>) word -> word, true, 3);
        sortRDD.foreach((VoidFunction<String>) line -> System.out.println(line));
    }
}

应用场景四:统计所有单词,获取统计结果,以及类似场景

/**
 * 统计所有单词,获取统计结果,以及类似场景
 * FlatMapToPair应用
 *
 * @author ccc
 */
public class HelloWorldFlatMapToPair implements Serializable {

    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("HelloWorldFlatMapToPair").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(sparkConf);
        // 元素"hello a","a","b","c"
        // ["hello a","a","b","c"] -> ("hello","a","a","b","c")
        // ("hello",1),("a",1),("a",1),("b",1),("c",1)
        // ("hello",1),("a",2),("b",1),("c",1)
        JavaRDD<String> lines = sc.textFile("/Users/ccc/Documents/data1.txt");
        JavaPairRDD<String, Integer> pairs = lines.flatMapToPair((PairFlatMapFunction<String, String, Integer>) line -> {
            String[] words = line.split(" ");
            List<String> wordList = Arrays.asList(words);
            List<Tuple2<String, Integer>> tuple2List = new ArrayList<>();
            wordList.forEach(word -> tuple2List.add(new Tuple2<>(word, 1)));
            return tuple2List.iterator();
        });
        JavaPairRDD<String, Integer> counts = pairs.reduceByKey((tuple2A, tuple2B) -> tuple2A + tuple2B);
        counts.foreach(count -> System.out.println(count._1() + ":" + count._2()));
        List<Tuple2<String, Integer>> tuple2List = counts.sortByKey().collect();
        tuple2List.forEach(tuple2 -> System.out.println(tuple2._1 + ":" + tuple2._2()));
    }
}

更多内容请关注微信公众号: “大数据开发与学习茶馆”
大数据开发与学习茶馆

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值