消费示例:
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// checkpoint常用设置参数 sink和source都可以设置
//每隔多久执行一次
env.enableCheckpointing(4000);
//执行方式
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//超时时间
env.getCheckpointConfig().setCheckpointTimeout(10000);
//最大并发
env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
// only required for Kafka 0.8
properties.setProperty("zookeeper.connect", "localhost:2181");
properties.setProperty("group.id", "test");
DataStream<String> stream = env
.addSource(new FlinkKafkaConsumer<>("topic", new SimpleStringSchema(), properties));
stream.print();
env.execute("JavaConnectorConsumerApp");
}
生产示例:
public static void main(String[] args) throws Exception{
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> stream = env.socketTextStream("192.168.1.176", 9999);
FlinkKafkaProducer<String> myProducer = new FlinkKafkaProducer<String>(
"localhost:9092", // broker list
"my-topic", // target topic
new SimpleStringSchema()); // serialization schema
// versions 0.10+ allow attaching the records' event timestamp when writing them to Kafka;
// this method is not available for earlier Kafka versions
myProducer.setWriteTimestampToKafka(true);
stream.addSink(myProducer);
env.execute("JavaConnectorProducerApp");
}