import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer09;
import org.apache.flink.util.Collector;
import java.util.Properties;
public class ReadingToKafka {
public static void main(String[] args) throws Exception {
String outPath = "/user/storm/test";
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//env.getConfig().enableSysoutLogging();
env.enableCheckpointing(1000);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers"
flink消费kafka 数据
最新推荐文章于 2025-04-24 21:36:10 发布
本文详细介绍了如何使用 Apache Flink 连接并消费 Kafka 数据源,包括配置 Flink-Kafka 连接器、设置消费者组、处理数据流以及实现容错机制等关键步骤,帮助读者掌握实时数据处理的流程。

最低0.47元/天 解锁文章
1789

被折叠的 条评论
为什么被折叠?



