弹性数据集,就是个逻辑上的数据集
算子
Transformation (Lazy)
map
flatMap
groupByKey
reduceByKey
psersist
cacheAction:
Reduce
collect
saveAsTextFile
窄依赖: 不用做shuffle
宽依赖: 需要做shuffle
stage划分: 遇到宽依赖shuffle就停止,然后划分一个stage,
SparkStreaming
val host = args(0)
var port = args(1)
val output = args(2)
val config = new SparkConf().setpackage spark.example
import org.apache.spark.{HashPartitioner, SparkConf}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
object StreamingWordCount {
def main(args: Array[String]): Unit = {
val host = args(0)
var port = args(1)
val output = args(2)
val config = new SparkConf().setAppName("StreamingWordCount")
val ssc = new StreamingContext(config, Seconds(60))
val updateFunc = (it: Iterator[(String, Seq[Int], Option[Int])]) => {
it.map(t => (t._1, t._2.sum + t._3.getOrElse(0)))
}
// Get DStream
val lines = ssc.socketTextStream(host, port.toInt, StorageLevel.MEMORY_AND_DISK_SER)
var words = lines.flatMap(x => x.split(' ')).persist(StorageLevel.MEMORY_AND_DISK_SER_2)
ssc.checkpoint("hdfs://master:9000/checkpoint")
val wordCount = words.map(x => (x, 1)).reduceByKey(_ + _).updateStateByKey(updateFunc, new HashPartitioner(ssc.sparkContext.defaultParallelism), true)
wordCount.print()
wordCount.saveAsTextFiles(output, "wordcount_updateStateByKey")
// 9:00 word1: 10 total -> f1.txt 文件1
// 9:05 (2 ge) word2: 12 total -> f2.txt 小文件2
// hdfs://.../result/文件1 a:10
// hdfs://.../result/文件2 a:12
// hdfs://.../result/文件3 a:15
// hdfs://.../result/文件4 a:25
// hdfs://.../result/文件5 a:30
// f1 a,10
// f2 a,12
// foreachRDD
/*
words.foreachRDD(rdd => {
val wordCount1 = rdd.map(x => (x, 1)).reduceByKey(_ + _)
})*/
ssc.start()
ssc.awaitTermination()
}
}