Spring与kafka集成
注意:
首先启动zookeeper
然后启动kafka
本文档是以主题 test1为前提demo,需要先创建主题
bin/kafka-topics.sh --create --zookeeper 192.168.124.200:2181 --replication-factor 1 --partitions 1 --topic test1
一、maven(pom.xml引jar)
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.1.6.RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.1.0</version>
二、producer配置(kafka-producer.xml)
<bean
id=“producerProperties” class=“java.util.HashMap”>
<constructor-arg>
<map>
<!-- kafka服务地址,可能是集群 -->
<entry key="bootstrap.servers" value="192.168.124.200:9092,192.168.124.201:9092" />
<!-- 有可能导致broker接收到重复的消息,默认值为3 -->
<entry key="retries" value="10" />
<!-- 每次批量发送消息的数量 -->
<entry key="batch.size" value="1638" />
<!-- 默认0ms,在异步IO线程被触发后(任何一个topic,partition满都可以触发) -->
<entry key="linger.ms" value="1" />
<!--producer可以用来缓存数据的内存大小。如果数据产生速度大于向broker发送的速度,producer会阻塞或者抛出异常 -->
<entry key="buffer.memory" value="33554432 " />
<!-- producer需要server接收到数据之后发出的确认接收的信号,此项配置就是指procuder需要多少个这样的确认信号 -->
<entry key="acks" value="all" />
<entry key="key.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
<entry key="value.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
</map>
</constructor-arg>
</bean>
<!-- 创建kafkatemplate需要使用的producerfactory bean -->
<bean id="producerFactory"
class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
<constructor-arg>
<ref bean="producerProperties" />
</constructor-arg>
</bean>
<!-- 创建kafkatemplate bean,使用的时候,只需要注入这个bean,即可使用template的send消息方法 -->
<bean id="KafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
<constructor-arg ref="producerFactory" />
<!--设置对应topic -->
<property name="defaultTopic" value="test1" />
</bean>
三、consumer配置(kafka-consumer.xml)
<bean
id=“consumerProperties” class=“java.util.HashMap”>
<constructor-arg>
<map>
<!--Kafka服务地址 -->
<entry key="bootstrap.servers" value="192.168.124.200:9092,192.168.124.201:9092" />
<!--Consumer的组ID,相同goup.id的consumer属于同一个组。 -->
<entry key="group.id" value="order-beta" />
<!--如果此值设置为true,consumer会周期性的把当前消费的offset值保存到zookeeper。当consumer失败重启之后将会使用此值作为新开始消费的值。 -->
<entry key="enable.auto.commit" value="true" />
<!--网络请求的socket超时时间。实际超时时间由max.fetch.wait + socket.timeout.ms 确定 -->
<entry key="session.timeout.ms" value="15000 " />
<entry key="key.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
<entry key="value.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
</map>
</constructor-arg>
</bean>
<!--指定具体监听类的bean -->
<bean id="messageListernerConsumerService"
class="com.ctid.rsc.business.service.kafka.KafkaConsumerListener" />
<!-- 创建consumerFactory bean -->
<bean id="consumerFactory"
class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
<constructor-arg>
<ref bean="consumerProperties" />
</constructor-arg>
</bean>
<bean id="containerProperties"
class="org.springframework.kafka.listener.config.ContainerProperties">
<constructor-arg value="test1" />
<property name="messageListener" ref="messageListernerConsumerService" />
</bean>
<bean id="messageListenerContainer"
class="org.springframework.kafka.listener.KafkaMessageListenerContainer"
init-method="doStart">
<constructor-arg ref="consumerFactory" />
<constructor-arg ref="containerProperties" />
</bean>
四、生产者Action
@Action(value = “kafkaAction”, results = { @Result(name = “success”, type = “json”) })
public class KafkaProducerServer extends AjaxBaseAction {
@Resource
private KafkaTemplate<Integer, String> kafkaTemplate;
public String execute() {
for (int i = 0; i < 3; i++) {
System.out.println("kafka生产消息 " + i);
kafkaTemplate.sendDefault("kafka生产消息 " + i);
}
return SUCCESS;
}
}
五、消费者监听
public class KafkaConsumerListener implements MessageListener<Integer, String> {
@Override
public void onMessage(ConsumerRecord<Integer, String> consumerRecord) {
Object o = consumerRecord.value();
System.out.println("kafka消费: "+String.valueOf(o));
}
}
六、测试效果图