Flink window Function - AggregateFunction

package window

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.api.scala._

/**
* @author: create by maoxiangyi
* @version: v1.0
* @description: window
* @date:2019 /6/4
*/
object AggregateWordCount {
def main(args: Array[String]): Unit = {
//设置环境
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.createLocalEnvironment()
//设置数据源
env.addSource(new SourceFunction[String] {
override def run(ctx: SourceFunction.SourceContext[String]): Unit = {
while (true) {
ctx.collect("hello hadoop hello storm hello spark")
Thread.sleep(1000)
}
}

override def cancel(): Unit = {}
})
//计算逻辑
.flatMap(_.split(" "))
.map((_, 1))
.keyBy(_._1)
.timeWindow(Time.seconds(10), Time.seconds(10))


.aggregate(new AggregateFunction[(String, Int), (String, Int), (String, Int)] {
override def createAccumulator(): (String, Int) = {
("", 0)
}
override def add(value: (String, Int), accumulator: (String, Int)): (String, Int) = {
(value._1, accumulator._2 + value._2)
}
override def getResult(accumulator: (String, Int)): (String, Int) = accumulator

override def merge(a: (String, Int), b: (String, Int)): (String, Int) = {
(a._1, a._2 + b._2)
}
}).print().setParallelism(1)
env.execute("word count")
}
}

转载于:https://www.cnblogs.com/maoxiangyi/p/10977917.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值