kafka stream

实时流输出

1、创建工程,导入kafka kafka stream依赖包

    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka_2.11</artifactId>
      <version>2.0.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-streams</artifactId>
      <version>2.0.0</version>
    </dependency>

2、代码:

package kafka;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class MyStreamDemo {
    public static void main(String[] args) {
        Properties prop = new Properties();
        //程序的唯一标识符以区别于其他应用程序与同一Kafka集群通信
        prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"mystream");
        //用于建立与Kafka集群的初始连接的主机/端口对的列表
        prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.232.211:9092");
        //记录键值对的默认序列化和反序列化库
        prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        //创建流构造器 定义Streams应用程序的计算逻辑,计算逻辑被定义为topology连接的处理器节点之一
        StreamsBuilder builder = new StreamsBuilder();
        //用构造好的builder将mystreamin topic里面的数据写入到mystreamout topic中
        builder.stream("mystreamin").to("mystreamout");
        //构建Topology对象
        Topology topo = builder.build();
        //构建 kafka流 API实例
        final KafkaStreams streams = new KafkaStreams(topo, prop);
        final CountDownLatch latch = new CountDownLatch(1);
        //附加关闭处理程序来捕获control-c
        Runtime.getRuntime().addShutdownHook(new Thread("steam"){
            public void run(){
                streams.close();
                latch.countDown();
            }
        });
        streams.start();
        try {
            latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

3、创建topic

kafka-topics.sh -zookeeper 192.168.232.211:2181 --create --topic mystreamin --partitions 1 --replication-factor 1
kafka-topics.sh -zookeeper 192.168.232.211:2181 --create --topic mystreamout --partitions 1 --replication-factor 1

4、输出
生产端:

kafka-console-producer.sh --topic mystreamin --broker-list 192.168.232.211:9092

消费端:

kafka-console-consumer.sh --topic mystreamout --bootstrap-server 192.168.232.211:9092 --from-beginning

5、运行结果如下
在这里插入图片描述

数字相加

1、代码:

package kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class SumStreamDemo {
    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"wordcount");
        prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.232.211:9092");
        prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        prop.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG,300);//间隔
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");//是否自动提交
        //earliest latest none
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");

        StreamsBuilder builder = new StreamsBuilder();
        KStream<Object, Object> source = builder.stream("suminput");
        // source=[null 4  , null 5  ,null 3]
        KTable<String, String> sum1 = source.map((key, value) ->
                        new KeyValue<String, String>("sum", value.toString())
                //[sum 4,sum 5,sum 3]
        ).groupByKey()//[sum (4,5,3)]
                .reduce((x, y) -> {
                    Integer sum = Integer.valueOf(x) + Integer.valueOf(y);
                    System.out.println("x:" + x + "y:" + y + "=" + sum);
                    return sum.toString();
                });
        sum1.toStream().to("sumoutput");
        Topology topo = builder.build();
        KafkaStreams streams = new KafkaStreams(topo,prop);
        CountDownLatch latch = new CountDownLatch(1);
        Runtime.getRuntime().addShutdownHook(new Thread("stream"){
            public void run(){
                streams.close();
                latch.countDown();
            }
        });
        streams.start();
        try {
            latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

    }
}

2、创建topic

kafka-topics.sh -zookeeper 192.168.232.211:2181 --create --topic suminput --partitions 1 --replication-factor 1
kafka-topics.sh -zookeeper 192.168.232.211:2181 --create --topic sumoutput --partitions 1 --replication-factor 1

3、输出:
生产端

kafka-console-producer.sh --topic suminput --broker-list 192.168.232.211:9092

消费端

kafka-console-consumer.sh --topic sumoutput --bootstrap-server 192.168.232.211:9092 --from-beginning

wordCount

代码

package kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;

public class Word {
    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(StreamsConfig.APPLICATION_ID_CONFIG,"wordcount1");
        prop.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.232.211:9092");
        prop.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        prop.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        prop.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG,3000);//间隔
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");//是否自动提交
        //earliest latest none
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");

        StreamsBuilder builder = new StreamsBuilder();
        KStream<Object, Object> source = builder.stream("wordin");
        //[null:hello world,null:hello java,null:hello scala]
//        source.foreach((key,value)->{
//            System.out.println(key + ":" + value);
//        });
        KTable<String, Long> count1 = source.flatMapValues((x) ->
                {
//                    System.out.println(x.toString()); //这里就将null:hello world 转换成 hello world
                    String[] split = x.toString().split("\\s+");
                    List<String> list = Arrays.asList(split);
                    return list;
                }
                //[null hello,null world,null hello]
        ).map((key, value) -> {
            return new KeyValue<String, String>(value, "1");
            //hello:1,world:1,hello:1
        }).groupByKey() //hello(1,1),world(1)
                .count();
        //hello:2 world:1
//        count1.toStream().foreach((key,value)->{
//            System.out.println("key:"+key+","+"value"+value);
//        });
        count1.toStream().map((key,value)->{
            return new KeyValue<String,String>(key,key+value.toString());
        }).to("wordout");

        Topology topo = builder.build();
        KafkaStreams streams = new KafkaStreams(topo,prop);
        CountDownLatch latch = new CountDownLatch(1);
        Runtime.getRuntime().addShutdownHook(new Thread("stream"){
            public void run(){
                streams.close();
                latch.countDown();
            }
        });
        streams.start();
        try {
            latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

2、创建topic

kafka--topics.sh --zookeeper 192.168.232.211:2181 --create --topic wordin --partitions 1 --replication-factor 1
kafka--topics.sh --zookeeper 192.168.232.211:2181 --create --topic wordout --partitions 1 --replication-factor 1

3、输出
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值