kafka consumer send producer模式消费发送事务一致性例子

package zktest.zktest;
 
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.requests.IsolationLevel;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
 
public class ConsumerTranProducerTest {
 
    public static void main(String[] args) throws InterruptedException {
     /*   Properties properties = new Properties();
        properties.put("bootstrap.servers", "******");
       // properties.put("request.required.acks", "1");
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		properties.put(ConsumerConfig.GROUP_ID_CONFIG ,"faffg") ;
		properties.put("auto.offset.reset", "earliest");
		properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
		properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
		properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
		//KafkaAdminClient.create(props);
		properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		properties.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		properties.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");*/

		Properties properties = new Properties();
		properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "*****");
		properties.put(ConsumerConfig.GROUP_ID_CONFIG ,"wwaaadd1fw") ;
		properties.put("auto.offset.reset", "earliest");
		properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
		properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
		properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		//props.put("enable.auto.commit", "false");
		properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");

		properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		properties.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
		//System.setProperty("java.security.auth.login.config","D:/renwu/2021Q1/kafka预研/kafka_client_jaas.conf");
		properties.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");



		Properties props = new Properties();

		props.put("bootstrap.servers", "10.28.8.60:17002");
		props.put("acks", "all");
		props.put("retries", 2);
		props.put("batch.size", 16384);
		props.put("linger.ms", 10);
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		//配置partitionner选择策略,可选配置
		props.put("partitioner.class", "zktest.zktest.SimplePartitioner");
		props.put("transactional.id", "transactional-id");
		props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,"SASL_PLAINTEXT");
		props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
		props.put("sasl.jaas.config",
				"org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='kafka';");
		 props.put("enable.idempotence", "true");

        //初始化生产者和消费者
        KafkaProducer<String, String> producer = new KafkaProducer<>(props);
        KafkaConsumer<String,String> consumer= new KafkaConsumer<String, String>(properties);
		consumer.subscribe(Arrays.asList("topic-name18"));
 
 
        //初始化事务
		producer.initTransactions();

        while (true){
            ConsumerRecords<String,String> consumerRecords = consumer.poll(Duration.ofMillis(5000));
            if(!consumerRecords.isEmpty()){
                Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                //开启事务
                producer.beginTransaction();
                try {
                    for(TopicPartition partition : consumerRecords.partitions()){
                        List<ConsumerRecord<String,String>> partitionRecords = consumerRecords.records(partition);
                        for(ConsumerRecord<String,String> record : partitionRecords){
                            //do some logical processing.
                            ProducerRecord<String,String> producerRecord = new ProducerRecord<>("topic-name17",record.key(),record.value());
							System.out.println("分区:"+record.partition() +"分区offset&&"+record.offset()+"&&分区key:"+record.key()+record.topic());
                            //消费--生产模型
                            producer.send(producerRecord,new Callback() {
								public void onCompletion(RecordMetadata metadata, Exception e) {
									if (e != null) {
										e.printStackTrace();
										System.out.println(11111);
									} else {
										System.out.println("The offset of the record we just sent is: " + metadata.offset()+"the fen qu:"+metadata.partition()+metadata.topic());
									}
								}
							});
                        }
                        long lastConsumedOffset = partitionRecords.get(partitionRecords.size()-1).offset();
                        offsets.put(partition,new OffsetAndMetadata(lastConsumedOffset+1));
                    }
                    //提交消费位移
                    producer.sendOffsetsToTransaction(offsets,"groupId");
                    //Thread.sleep(5000);
                
                    //提交事务
                    producer.commitTransaction();
                }catch (ProducerFencedException e){
                    //中止事务
                    producer.abortTransaction();
                }
            }
        }
    }
}
### Kafka 实现最终一致性的机制和原理 Kafka 的最终一致性依赖于其分布式架构中的多种特性,这些特性共同作用以确保数据在生产者、消费者以及 broker 之间的传递能够达到预期的一致状态。 #### 分区多副本架构 Kafka 使用分区多副本架构来保障数据可靠性。当一条消息被写入某个分区时,该消息会被同步到多个副本中[^1]。这种设计使得即使部分节点发生故障,只要还有存活的副本可以提供服务,则整个系统的可用性和数据持久性不会受到影响。因此,在正常运行条件下,所有副本都会保持最新版本的数据;而在异常情况下(如网络分区或者硬件损坏),系统可以通过选举新的 Leader 副本来恢复服务并继续处理请求。 #### Acknowledgment (ACKs) 确认机制 为了进一步增强数据传输过程中的安全性,Kafka 提供了一个灵活的消息确认选项——acks参数设置[^3]. 生产者可以根据实际需求调整此配置项,从而决定何时认为某条记录已经成功提交至Broker端. - `acks=0` : 表示Producer不等待任何来自Server的响应即视为完成操作; - `acks=1` : Producer只等到Leader接收到新纪录的通知便返回结果给调用方; - `acks=all`(或 `-1`) :Producer需待ISR(In-Sync Replicas)set内的全部Follower均复制完毕之后才会回应客户端. 通过合理选用上述模式之一可有效降低因中途丢失而导致重复发送的风险概率的同时也兼顾性能表现最佳平衡点的选择取决于具体应用场景下的优先级考量因素比如吞吐量要求与容错能力之间权衡关系等等情况而定。 #### 事务支持 对于更复杂的场景下需要跨批次甚至不同主题间协调动作才能达成全局意义上的原子行为时候则需要用到Kafka内置提供的Transaction API功能集成了Produce+Consume模型之上形成了一套完整的解决方案用来解决这类难题[^2] 。它允许应用程序定义一系列逻辑关联紧密不可分割开来的单元作为一个整体来进行管理控制流程走向要么完全执行要么彻底回滚没有任何中间态残留现象存在保证业务层面语义上的正确无误诠释了所谓的Exactly Once Semantics(EOS). #### Offset Management & Commit Protocol 最后值得一提的是关于Consumer Group内部成员个体如何追踪自己所处位置进展方面采用了Offset管理策略配合Commit协议共同协作完成此项任务目标[^4] .每当一个Consumer实例读取到了一批次的新事件后就会尝试将其对应的偏移量上报存储起来以便下次重启连接重新开始消费可以从上次中断的地方接着往下走而不是重头再来一遍浪费资源时间成本高昂效率低下等问题得以缓解改善很多程度上提高了用户体验满意度水平线标准高度有所提升进步明显可见一斑. ```python from kafka import KafkaProducer producer = KafkaProducer(bootstrap_servers='localhost:9092') def send_message(topic, key, value): future = producer.send(topic, key=key.encode('utf-8'), value=value.encode('utf-8')) result = future.get(timeout=60) print(f'Message sent successfully to {topic}: {result}') send_message("test-topic", "key1", "value1") ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值