import org.apache.spark.sql.functions.{row_number, max, broadcast}
import org.apache.spark.sql.expressions.Window
val df = sc.parallelize(Seq(
(0,"cat26",30.9), (0,"cat13",22.1), (0,"cat95",19.6), (0,"cat105",1.3),
(1,"cat67",28.5), (1,"cat4",26.8), (1,"cat13",12.6), (1,"cat23",5.3),
(2,"cat56",39.6), (2,"cat40",29.7), (2,"cat187",27.9), (2,"cat68",9.8),
(3,"cat8",35.6))).toDF("Hour", "Category", "TotalValue")
//+----+--------+----------+//|Hour|Category|TotalValue|//+----+--------+----------+//| 0| cat26| 30.9|//| 0| cat13| 22.1|//| 0| cat95| 19.6|//| 0| cat105| 1.3|//| 1| cat67| 28.5|//| 1| cat4| 26.8|//| 1| cat13| 12.6|//| 1| cat23| 5.3|//| 2| cat56| 39.6|//| 2| cat40| 29.7|//| 2| cat187| 27.9|//| 2| cat68| 9.8|//| 3| cat8| 35.6|//| ...| ....| ....|//+----+--------+----------+
val w = Window.partitionBy($"hour").orderBy($"TotalValue".desc)
val dfTop = df.withColumn("rn", row_number.over(w)).where($"rn" === 1).drop("rn")
dfTop.show
// +----+--------+----------+
// |Hour|Category|TotalValue|
// +----+--------+----------+
// | 0| cat26| 30.9|
// | 1| cat67| 28.5|
// | 2| cat56| 39.6|
// | 3| cat8| 35.6|
// +----+--------+----------+
其它效率更高的方法,参考:
https://stackoverflow.com/questions/33878370/how-to-select-the-first-row-of-each-group
本文展示了如何使用Spark SQL的Window函数和row_number()函数高效地从数据集中筛选出每个小时内的最高总价值类别。通过示例代码,读者可以学习如何在Apache Spark中进行数据聚合和行级排名,以实现快速获取每个组的顶级记录。
211

被折叠的 条评论
为什么被折叠?



