spark高级算子(二)

import org.apache.spark.{SparkConf, SparkContext}

/**
  * @author zoujc
  * @date 2018/11/1
  */
object SparkTest2 {
   def main(args: Array[String]): Unit = {
      val conf = new SparkConf().setAppName("SparkTest2").setMaster("local[2]")
      val sc = new SparkContext(conf)

      /**
        * combineByKey和reduceByKey效果相同
        * 第一个参数x:原封不动的取出来,第二个参数是函数:局部运算,第三个参数是函数:对局部运算后的结果在做运算
        * 每个分区中每个key的value中的第一个值:(hello,1)(hello,1)(good,1)-->(hello(1,1),good(1))-->x就相当于hello的第一个1, good中的1
        */
      val rdd1 = sc.parallelize(List(("hello", 1), ("hello", 1), ("good", 1)))
      val rdd2 = rdd1.combineByKey(x => x,(a: Int,b: Int) => a + b, (m: Int,n: Int) => m + n)
//    println(rdd1.collect().toBuffer)    ArrayBuffer((hello,1), (hello,1), (good,1))
//    println(rdd2.collect().toBuffer)    ArrayBuffer((hello,2), (good,1))

      //当第一个参数变成 x+10 相当于hello的初始计算值是10+1,即11,good的初始计算值是10+1,也是11。
      val rdd3 = rdd1.combineByKey(x => x + 10,(a: Int,b: Int) => a + b, (m: Int,n: Int) => m + n)
//    println(rdd3.collect().toBuffer)    ArrayBuffer((hello,22), (good,11))

      val rdd4 = sc.parallelize(List("dog", "cat", "gnu", "salmon", "rabbit", "turkey", "wolf", "bear", "bee"), 3)
      val rdd5 = sc.parallelize(List(1, 1, 2, 2, 2, 1, 2, 2, 2), 3)

      //拉链操作,形成map
      val rdd6 = rdd5.zip(rdd4)
//    println(rdd6.collect().toBuffer)    ArrayBuffer((1,dog), (1,cat), (2,gnu), (2,salmon), (2,rabbit), (1,turkey), (2,wolf), (2,bear), (2,bee))

      //数量是1的放在一起,数量是2的放在一起
      val rdd7 = rdd6.combineByKey(List(_),(x: List[String],y: String) => x :+ y,(m: List[String],n: List[String]) => m ++ n) //::: 和 ++等价
//    println(rdd7.collect().toBuffer)    ArrayBuffer((1,List(dog, cat, turkey)), (2,List(gnu, salmon, rabbit, wolf, bear, bee)))

      val rdd8 = sc.parallelize(List(1,2,3,4,5,6,7,8,9),2)
      val func1 = (index: Int, iter:Iterator[Int]) => {
         iter.toList.map(x => s"[PartID: $index, val: $x]").iterator
      }
      val res1 = rdd8.mapPartitionsWithIndex(func1)
//    println(res1.collect().toBuffer)
      //ArrayBuffer([PartID: 0, val: 1], [PartID: 0, val: 2], [PartID: 0, val: 3], [PartID: 0, val: 4],
      // [PartID: 1, val: 5], [PartID: 1, val: 6], [PartID: 1, val: 7], [PartID: 1, val: 8], [PartID: 1, val: 9])

      //repartition分区数变为3
      val rdd9 = rdd8.repartition(3)
      val res2 = rdd9.mapPartitionsWithIndex(func1)
//    println(res2.collect().toBuffer)
//    ArrayBuffer([PartID: 0, val: 3], [PartID: 0, val: 7],
      // [PartID: 1, val: 1], [PartID: 1, val: 4], [PartID: 1, val: 5], [PartID: 1, val: 8],
      // [PartID: 2, val: 2], [PartID: 2, val: 6], [PartID: 2, val: 9])

      //coalesce,默认数据不进行shuffle,则分区数量不变,true表示进行shuffle操作,分区数量根据参数改变
      val rdd10 = rdd8.coalesce(3,true)
//    println(rdd10.partitions.length)    3
      val res3 = rdd10.mapPartitionsWithIndex(func1)
//    println(res3.collect().toBuffer)
      //  ArrayBuffer([PartID: 0, val: 3], [PartID: 0, val: 7],
      // [PartID: 1, val: 1], [PartID: 1, val: 4], [PartID: 1, val: 5], [PartID: 1, val: 8],
      // [PartID: 2, val: 2], [PartID: 2, val: 6], [PartID: 2, val: 9])

      //collectAsMap : Map(b -> 2, a -> 1)
      val rdd11 = sc.parallelize(List(("a", 1), ("b", 2)))
      val res4 = rdd11.collectAsMap
//    println(rdd11.collect().toBuffer)
//    println(res4)   Map(b -> 2, a -> 1)

      //countByKey,计算key的数量
      val rdd12 = sc.parallelize(List(("a", 1), ("b", 2), ("b", 2), ("c", 2), ("c", 1)))
      val res5 = rdd12.countByKey
//    println(res5)   Map(b -> 2, a -> 1, c -> 2)
      //vountByValue,计算value的数量
      val res6 = rdd12.countByValue
//    println(res6)   Map((b,2) -> 2, (c,2) -> 1, (a,1) -> 1, (c,1) -> 1)

      //filterByRange,范围过滤,该函数作用于键值对RDD,对RDD中的元素进行过滤,返回键在指定范围中的元素。
      val rdd13 = sc.parallelize(List(("e", 5), ("c", 3), ("d", 4), ("c", 2), ("a", 1), ("b", 6)))
      val res7 = rdd13.filterByRange("b","d")
//    println(res7.collect().toBuffer)    ArrayBuffer((c,3), (d,4), (c,2), (b,6))

      //flatMapValues:根据v去扁平化
      val rdd14 = sc.parallelize(List(("a", "1 2"), ("b", "3 4")))
      val res8 = rdd14.flatMapValues(_.split(" ")).collect()
//    println(res8.toBuffer)  ArrayBuffer((a,1), (a,2), (b,3), (b,4))

      //foldByKey
      val rdd15 = sc.parallelize(List("dog", "wolf", "cat", "bear", "ok"), 2)
      val rdd16 = rdd15.map(x => (x.length,x))
      // _+_ 表示字符串的拼接
      val rdd17 = rdd16.foldByKey(" ")(_ + _)
//    println(rdd16.collect().toBuffer)   ArrayBuffer((3,dog), (4,wolf), (3,cat), (4,bear), (2, ok))
//    println(rdd17.collect().toBuffer)   ArrayBuffer((4, wolf bear), (2, ok), (3, dog cat))

      //foreachPartition 不会生成一个新的RDD
      val rdd18 = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9), 3)
      val res9 = rdd18.foreachPartition(x => println(x.reduce(_ + _)))    //6,15,24

      //keyBy,以传入的参数为key
      val rdd19 = sc.parallelize(List("dog", "salmon", "salmon", "rat", "elephant"), 3)
      //以单词的长度作为key
      val res10 = rdd19.keyBy(_.length).collect()
//    println(res10.toBuffer)     ArrayBuffer((3,dog), (6,salmon), (6,salmon), (3,rat), (8,elephant))
      //以单词的第一个字母为key
      val res11 = rdd19.keyBy(_ (0)).collect()
//    println(res11.toBuffer)     ArrayBuffer((d,dog), (s,salmon), (s,salmon), (r,rat), (e,elephant))

      //keys      values
      val rdd20 = sc.parallelize(List("dog", "tiger", "lion", "cat", "panther", "eagle"), 2)
      val rdd21 = rdd20.map(x => (x.length,x))
      val res12 = rdd21.keys
      val res13 = rdd21.values
//    println(res12.collect().toBuffer)   ArrayBuffer(3, 5, 4, 3, 7, 5)
//    println(res13.collect().toBuffer)   ArrayBuffer(dog, tiger, lion, cat, panther, eagle)
   }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值