package com.bjsxt;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
public class aggregateByKey {
public static void main(String[] args) {
SparkConf conf=new SparkConf().setAppName("test").setMaster("local");
JavaSparkContext sc=new JavaSparkContext(conf);
List<Tuple2<String, Integer>> list = Arrays.asList(
new Tuple2<>("zhangsan",10),
new Tuple2<>("lisi",11),
new Tuple2<>("zhangsan",12),
new Tuple2<>("zhangsan"
Spark算子中aggregateByKey算子的理解【Java版纯代码】
最新推荐文章于 2022-05-07 10:47:42 发布