object TopLevelFunction { def main(args: Array[String]): Unit = { //map函数,所有集合类型都存在map函数 //Array类型 Array("spark", "scala", "hadoop", "flink").map(_ * 2).foreach(println(_)) //List类型 List((1, "spark"), (2, "hadoop"), (3, "hive")).map(x => x._1).foreach(println(_)) List((1, "spark"), (2, "hadoop"), (3, "hive")).map(_._1).foreach(println(_)) List((1, "spark"), (2, "hadoop"), (3, "hive")).map(_._2).foreach(println(_)) //Map类型 Map("spark" -> 1, "hadoop" -> 2, "flume" -> 3).map(_._1) println("=" * 40) //flatMap函数,相当于flatten+map,先执行map再执行flatten List(List(1, 2, 3), List(2, 3, 4)).flatMap(x => x).foreach(x => print(x + " ")) println() println("=" * 40) //filter函数 Array(2, 1, 5, 3, 7, 5, 9).filter(_ > 4).foreach(x => print(x + " ")) println() List("List", "Set", "Array").filter(_.length > 3).foreach(x => print(x + " ")) println() Map("spark" -> 3, "hive" -> 5, "hadoop" -> 7).filter(_._2 > 3).foreach(x => print(x + " ")) println() println("=" * 40) //reduce函数 println(Array(1, 2, 3, 4, 5).reduce(_ + _)) println(List("hadoop", "spark", "hive").reduce(_ + _)) Array(1, 2, 3, 4, 5).reduce((x, y) => { println(x, y) x + y }) Array(1, 2, 3, 4, 5).reduceLeft((x, y) => { println(x, y) x + y }) Array(1, 2, 3, 4, 5).reduceRight((x, y) => { println(x, y) x + y }) println("=" * 40) //fold函数 Array(1, 2, 3, 4, 5).foldLeft(0)((x, y) => { println(x, y) x + y }) println("*" * 20) Array(1, 2, 3, 4, 5).foldRight(10)((x, y) => { println(x, y) x + y }) println(Array(1, 2, 3, 4, 5).foldLeft(0)(_ + _)) println(Array(1, 2, 3, 4, 5).foldRight(10)(_ + _)) println("=" * 40) //scan函数 ////从左扫描,每步的结果都保存起来,执行完成后生成数组 val result = Array(1, 2, 3, 4, 5).scanLeft(0)((x, y) => { println(x, y) x + y }) println("*" * 20) //值为true,说明result是一个Int型的Array数组 println(result.isInstanceOf[Array[Int]]) for (i <- result) print(i + " ") println() println("*" * 20) //从右扫描,每步的结果都保存起来,执行完成后生成数组 val result2 = Array(1, 2, 3, 4, 5).scanRight(0)((x, y) => { println(x, y) x + y }) for (i <- result2) print(i + " ") } }
Scala高阶函数
最新推荐文章于 2024-10-16 16:44:58 发布