/**
- spark 根据本地文件来处理数据
- 样例类+RDD创建DataFrame
*/
具体代码如下:
object SparkFile {
case class Record(shopId:String,date:String,volume:String)
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession.builder()
.master("local[*]").appName("spark_sql_01")
.getOrCreate()
val sc = spark.sparkContext
val rdd: RDD[Record] = sc.textFile("file:///C:\\Users\\Administrator\\study\\spark\\Spark_Mysql\\file\\***.txt", 5)
.mapPartitions(p => {
p.map(line => {
val ps = line.split(",")
Record(ps(0), ps(1), ps(2))
})
})
// val frame: DataFrame = spark.createDataFrame(rdd)
// frame.show()
// println(frame.count())//总共有300000行数据
import spark.implicits._
val rx = "(.*?)-(.*?)-(.*?) .*"
spark.createDataFrame(rdd)
.select($"shopId".cast("Int"),
regexp_extract($"date",rx,1).as("year"),
regexp_extract($"date",rx,2).as("month"),
regexp_extract($"date",rx,3).as("day"),
$"volume".cast("Float")
)
//日聚合
.groupBy($"shopId",$"year",$"month",$"day")
.agg(sum($"volume").as("sumVolume")
,count($"volume").as("cntVolume"))
//月聚合
.groupBy($"shopId",$"year",$"month")
.agg(sum($"sumVolume").cast("decimal(10,2)").as("sumVolume")
,sum($"cntVolume").as("cntVolume"))
.filter($"sumVolume".geq(100000))
.sort($"sumVolume".desc,$"cntVolume".asc)
// .select($"shopId",$"year",$"month",$"day",$"sumVolume",$"cntVolume")
.limit(20)
.show()
sc.stop()
spark.close()
}
}