@Override
public void registerUnicast(@Nonnull String topic, @Nonnull String groupId, @Nonnull EventHandler handler,
@Nonnull ExecutorService executorService,
PartitionAssignorMode partitionAssignorMode) {
partitionAssignorMode = getPartitionAssignorMode(topic, groupId, partitionAssignorMode);
register(topic, groupId, handler, executorService, partitionAssignorMode);
}
public void registerUnicast(@Nonnull String topic, @Nonnull String groupId, @Nonnull EventHandler handler,
@Nonnull ExecutorService executorService,
PartitionAssignorMode partitionAssignorMode,DLQConfig dlqConfig) {
partitionAssignorMode = getPartitionAssignorMode(topic, groupId, partitionAssignorMode);
EventHandler wrappedHandler = new DLQEventHandlerWrapper(handler, dlqConfig, this);
register(topic, groupId, wrappedHandler, executorService, partitionAssignorMode);
}@Data
@Configuration
@ConfigurationProperties(prefix = "eventcenter.deadletter")
public class DLQConfig {
/** 是否启用死信队列(默认关闭) */
private boolean enabled = false;
/** 最大重试次数(默认3次) */
private int maxRetries = 3;
/** 死信队列Topic(默认dlq_topic) */
private String dlqTopic = "dlq_topic";
/** 重试策略(默认指数退避) */
private RetryStrategy retryStrategy = new ExponentialBackoffRetryStrategy();
}@Slf4j
@RequiredArgsConstructor
public class DLQEventHandlerWrapper implements EventHandler {
// 原事件处理器
private final EventHandler delegate;
// DLQ配置
private final DLQConfig dlqConfig;
// 事件中心(用于发送死信)
private final KafkaEventCenter eventCenter;
@Override
public void handleEvent(Event event) {
if (!dlqConfig.isEnabled()) {
delegate.handleEvent(event);
// 未启用DLQ,直接执行原逻辑
return;
}
int retryCount = 0;
boolean success = false;
while (retryCount < dlqConfig.getMaxRetries()) {
try {
delegate.handleEvent(event); // 执行原事件处理
success = true;
break;
} catch (Exception e) {
retryCount++;
log.warn("Eventhandle failed (attempt {}/{}): {}",
retryCount, dlqConfig.getMaxRetries(), e.getMessage());
if (retryCount >= dlqConfig.getMaxRetries()) {
sendToDLQ(event); // 达到最大重试次数,发送死信
break;
}
// 计算重试延时(指数退避)
long delay = dlqConfig.getRetryStrategy().getNextDelay(retryCount);
try {
sendDelay(event, delay); // 重试延时占位(后续实现)
Thread.sleep(delay); // 临时用sleep模拟,后续替换为异步延时
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
log.error("Retry interrupted for event", ie);
break;
}
}
}
if (!success) {
log.error("Event failed after {} retries, sent to DLQ"
, dlqConfig.getMaxRetries());
}
}
/**
* 将事件发送至死信队列
*/
private void sendToDLQ(BaseEvent event) {
String dlqTopic = dlqConfig.getDlqTopic();
try {
// 根据事件类型自动选择序列化方式(对齐原发送逻辑)
SerializeEnum serializeEnum = (event instanceof EventV2)
? SerializeEnum.KRYO
: SerializeEnum.JSON;
eventCenter.send(dlqTopic, event, serializeEnum);
// 调用现有send方法
log.info("Event sent to DLQ topic: {}", dlqTopic);
} catch (Exception e) {
log.error("Failed to send event to DLQ", e);
}
}
/**
* 重试延时占位方法(后续实现)
* 例如:用Redis/ZooKeeper实现延时队列,或用定时任务调度
*/
private void sendDelay(BaseEvent event, long delay) {
log.info("Scheduled delay of {}ms for event", delay);
// TODO: 后续实现真实延时逻辑(如:将事件存入延时队列,到期后重试)
}
}public interface RetryStrategy {
/**
* 获取下一次重试的延时(单位:毫秒)
* @param retryAttempt 当前重试次数(从1开始)
* @return 延时毫秒数
*/
long getNextDelay(int retryAttempt);
}
public class ExponentialBackoffRetryStrategy implements RetryStrategy {
private final long initialDelay;
public ExponentialBackoffRetryStrategy() {
this(1000L);
}
public ExponentialBackoffRetryStrategy(long initialDelay) {
this.initialDelay = initialDelay;
}
@Override
public long getNextDelay(int retryAttempt) {
return initialDelay * (long) Math.pow(2, retryAttempt - 1);
}
}通过对registerUnicast方法重载实现了死信逻辑,请根据以下测试调用的代码和输出的日志做出一定修改:1.如何获取死信消息的原目标,原偏移量,重试次数;2.在死信配置中,不允许自定义修改死信topic名称,而是改为默认值,默认原topic_dlq_topic;调用代码及日志输出如下:eventcenter:
deadletter:
enabled: true # 启用死信队列
max-retries: 2 # 最大重试2次(方便测试观察)
dlq-topic: "vms_dlq_hello-topic" # 死信队列Topic名称
retry-strategy: # 重试策略(指数退避,初始1秒,倍数2)
initial-interval-ms: 1000
multiplier: 2
kafka:
producer:
servers: 192.168.203.128:9092 # 对应KafkaProducerProperties的servers属性(原spring.kafka.bootstrap-servers)
max-in-flight-requests-per-connection: 5
enable-idempotence: false
retries: 2147483647 # Integer.MAX_VALUE
delivery-timeout-ms: 120000
batch-size: 4096
linger: 1
buffer-memory: 33554432
acks: "all"
compression-type: "none"
max-request-size: 1048576
consumer:
# 必填:Kafka服务地址(多个地址用逗号分隔)
servers: 192.168.203.128:9092 # 根据实际集群调整
dlq-consumer:
group-id: "dlq-demo-group" # 死信消费者组
enable-auto-commit: true # 自动提交偏移量(方便测试)
# 消费间隔相关(默认300000ms=5分钟)
max-poll-interval: 300000 # 两次poll操作的最大间隔,超时则消费者会被移出组
# 会话超时(默认45000ms=45秒)
session-timeout: 45000 # 与Broker的心跳超时时间,超时则触发重平衡
# 单次拉取最大消息数(默认200条)
max-poll-records: 200 # 根据业务处理能力调整,提高吞吐量可增大此值
# 拉取超时时间(默认500ms)
max-poll-timeout: 500 # 等待足够消息的最大时间,超时则返回已拉取的消息
# 是否自动提交偏移量(默认false)
enable-auto-commit: false # 建议手动提交以保证消费幂等性
# 偏移量重置策略(默认"latest")
auto-offset-reset: latest # 可选值:latest(从最新)、earliest(从最早)、none(无偏移则报错)
# 偏移量提交重试次数(默认3次)
commit-retries: 3 # 手动提交失败时的重试次数
# 是否异步提交偏移量(默认false)
async-commits: false # 异步提交可能更快,但无法保证顺序
# 同步提交超时时间(默认60秒)
sync-commits-timeout: 60 # 同步提交时等待Broker响应的最大时间(秒)
# 分片消息最大超时时间(默认120000ms=2分钟)
max-timeout-for-message-slice: 120000 # 分片消息处理的最大允许耗时
# 接收缓冲区大小(默认-1,使用系统默认)
receive-buffer: -1 # 网络接收缓冲区大小(字节),-1表示使用系统默认
# 最小拉取数据量(默认1字节)
fetch-min-bytes: 1 # Broker返回的最小数据量,不足则等待
# 最大等待时间(默认500ms)
fetch-max-wait-ms: 500 # 配合fetch-min-bytes,等待数据的最大时间
# 初次消费延迟(默认0秒)
initial-poll-wait-time: 0 # Bean初始化后首次poll的延迟时间(秒)
# 需要延迟消费的Topic列表(默认空)
delay-consume-topics: # 与initial-poll-wait-time配合,对指定Topic延迟消费
@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class, DataSourceTransactionManagerAutoConfiguration.class})
@ComponentScan(basePackages = {
"com.tplink.nbu.demo.basicspringboot", // 启动类所在包
"com.tplink.smb.eventcenter.port.kafka.deadletter" // DeadLetterConfig所在包
})
public class KafkaDemoApp implements CommandLineRunner {
@Autowired
private KafkaEventCenter eventCenter;
@Autowired
private DLQConfig deadLetterConfig;
public static void main(String[] args) {
SpringApplication.run(KafkaDemoApp.class, args);
}
@Override
public void run(String... args) throws Exception {
// 注册带死信的消费者(模拟消费失败)
registerDLQConsumer();
// 注册死信队列的监听(验证死信是否发送成功)
registerDLQListener();
// 发送测试消息(触发消费失败)
sendHelloMessage();
}
/**
* 注册带死信的消费者(模拟消费失败)
*/
private void registerDLQConsumer() {
// 定义一个会抛出异常的事件处理器(模拟消费失败)
EventHandler failingHandler = event -> {
System.out.println("尝试处理消息: " + event.getMessage());
throw new RuntimeException("模拟业务处理失败"); // 主动抛异常
};
// 调用带死信的registerUnicast方法
eventCenter.registerUnicast(
"hello-topic", // 主Topic
"demo-group", // 消费者组Id
failingHandler, // 会失败的处理器
ForkJoinPool.commonPool(), // 线程池
PartitionAssignorMode.COOPERATIVE_STICKY, // 分区分配策略
deadLetterConfig // 死信配置(自动注入)
);
}
/**
* 注册死信队列的监听(验证死信是否发送成功)
*/
private void registerDLQListener() {
eventCenter.registerUnicast(
deadLetterConfig.getDlqTopic(), // 死信Topic
"dlq-demo-group", // 消费者组Id(对应配置中的dlq-consumer.group-id)
event -> System.out.println("收到死信消息: " + event.getMessage()), // 死信处理器
ForkJoinPool.commonPool() // 线程池
);
}
/**
* 发送测试消息(触发消费流程)
*/
private void sendHelloMessage() {
Event event = new Event("key1", "Hello Kafka! 这是一条会触发死信的消息");
eventCenter.send("hello-topic", event); // 发送至主Topic
}
}2025-09-28 11:03:52.183 INFO 32724 --- [ main] c.t.n.demo.basicspringboot.KafkaDemoApp : Starting KafkaDemoApp using Java 1.8.0_462-462 on 18088363-BG with PID 32724 (D:\r\idmdemo\target\classes started by admin in D:\r\idmdemo)
2025-09-28 11:03:52.184 INFO 32724 --- [ main] c.t.n.demo.basicspringboot.KafkaDemoApp : No active profile set, falling back to 1 default profile: "default"
2025-09-28 11:03:52.800 INFO 32724 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http)
2025-09-28 11:03:52.806 INFO 32724 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat]
2025-09-28 11:03:52.806 INFO 32724 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/9.0.71]
2025-09-28 11:03:52.866 INFO 32724 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
2025-09-28 11:03:52.866 INFO 32724 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 660 ms
2025-09-28 11:03:53.289 INFO 32724 --- [ main] o.s.b.a.e.web.EndpointLinksResolver : Exposing 1 endpoint(s) beneath base path '/actuator'
2025-09-28 11:03:53.316 INFO 32724 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path ''
2025-09-28 11:03:53.324 INFO 32724 --- [ main] c.t.n.demo.basicspringboot.KafkaDemoApp : Started KafkaDemoApp in 1.362 seconds (JVM running for 1.659)
2025-09-28 11:03:53.343 INFO 32724 --- [ main] c.t.s.e.port.kafka.KafkaEventCenter : start to register topic: hello-topic, groupId: demo-group
2025-09-28 11:03:53.348 INFO 32724 --- [ main] c.t.s.e.port.kafka.KafkaEventCenter : start to register topic: vms_dlq_hello-topic, groupId: dlq-demo-group
2025-09-28 11:03:53.360 INFO 32724 --- [_dlq-demo-group] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [192.168.203.128:9092]
check.crcs = true
client.dns.lookup = use_all_dns_ips
client.id = consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = dlq-demo-group
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
internal.throw.on.fetch.stable.offset.unsupported = false
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 200
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
receive.buffer.bytes = -1
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 45000
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
2025-09-28 11:03:53.360 INFO 32724 --- [opic_demo-group] o.a.k.clients.consumer.ConsumerConfig : ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
bootstrap.servers = [192.168.203.128:9092]
check.crcs = true
client.dns.lookup = use_all_dns_ips
client.id = consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = demo-group
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
internal.throw.on.fetch.stable.offset.unsupported = false
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 200
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [org.apache.kafka.clients.consumer.CooperativeStickyAssignor]
receive.buffer.bytes = -1
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 45000
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.apache.kafka.common.serialization.ByteArrayDeserializer
2025-09-28 11:03:53.371 INFO 32724 --- [ main] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = -1
batch.size = 4096
bootstrap.servers = [192.168.203.128:9092]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = producer-1
compression.type = none
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = false
interceptor.classes = []
internal.auto.downgrade.txn.commit = false
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 1
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 30000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLSv1.2
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class org.apache.kafka.common.serialization.ByteArraySerializer
2025-09-28 11:03:53.394 INFO 32724 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka version: 2.8.0
2025-09-28 11:03:53.394 INFO 32724 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: ebb1d6e21cc92130
2025-09-28 11:03:53.394 INFO 32724 --- [ main] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1759028633393
2025-09-28 11:03:53.403 INFO 32724 --- [opic_demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka version: 2.8.0
2025-09-28 11:03:53.403 INFO 32724 --- [opic_demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: ebb1d6e21cc92130
2025-09-28 11:03:53.403 INFO 32724 --- [opic_demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1759028633403
2025-09-28 11:03:53.404 INFO 32724 --- [opic_demo-group] c.t.s.e.p.k.consumer.KafkaConsumerTask : start to consumer kafka topic: hello-topic
2025-09-28 11:03:53.404 INFO 32724 --- [opic_demo-group] c.t.s.e.p.k.c.AbstractTaskService : KafkaConsumerTask is running! topic:hello-topic
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka version: 2.8.0
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka commitId: ebb1d6e21cc92130
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] o.a.kafka.common.utils.AppInfoParser : Kafka startTimeMs: 1759028633403
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] c.t.s.e.p.k.consumer.KafkaConsumerTask : start to consumer kafka topic: vms_dlq_hello-topic
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] c.t.s.e.p.k.c.AbstractTaskService : KafkaConsumerTask is running! topic:vms_dlq_hello-topic
2025-09-28 11:03:53.404 INFO 32724 --- [opic_demo-group] o.a.k.clients.consumer.KafkaConsumer : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Subscribed to topic(s): hello-topic
2025-09-28 11:03:53.404 INFO 32724 --- [_dlq-demo-group] o.a.k.clients.consumer.KafkaConsumer : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Subscribed to topic(s): vms_dlq_hello-topic
2025-09-28 11:03:53.566 INFO 32724 --- [ad | producer-1] org.apache.kafka.clients.Metadata : [Producer clientId=producer-1] Cluster ID: ZzP7spDrRwuJk27muhJ29g
2025-09-28 11:03:53.566 INFO 32724 --- [_dlq-demo-group] org.apache.kafka.clients.Metadata : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Cluster ID: ZzP7spDrRwuJk27muhJ29g
2025-09-28 11:03:53.566 INFO 32724 --- [opic_demo-group] org.apache.kafka.clients.Metadata : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Cluster ID: ZzP7spDrRwuJk27muhJ29g
2025-09-28 11:03:53.567 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Discovered group coordinator admin1-virtual-machine:9092 (id: 2147483647 rack: null)
2025-09-28 11:03:53.567 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Discovered group coordinator admin1-virtual-machine:9092 (id: 2147483647 rack: null)
2025-09-28 11:03:53.999 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] (Re-)joining group
2025-09-28 11:03:53.999 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] (Re-)joining group
2025-09-28 11:03:54.020 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] (Re-)joining group
2025-09-28 11:03:54.020 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] (Re-)joining group
2025-09-28 11:03:54.031 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Successfully joined group with generation Generation{generationId=37, memberId='consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98-bd8ebd21-6acd-4fc0-9ded-a788e288a241', protocol='cooperative-sticky'}
2025-09-28 11:03:54.031 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Successfully joined group with generation Generation{generationId=36, memberId='consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c-7bd934c9-75d8-4437-9353-b13d1e4d0124', protocol='cooperative-sticky'}
2025-09-28 11:03:54.033 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Finished assignment for group at generation 36: {consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c-7bd934c9-75d8-4437-9353-b13d1e4d0124=Assignment(partitions=[vms_dlq_hello-topic-0])}
2025-09-28 11:03:54.033 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Finished assignment for group at generation 37: {consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98-bd8ebd21-6acd-4fc0-9ded-a788e288a241=Assignment(partitions=[hello-topic-0])}
2025-09-28 11:03:54.044 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Successfully synced group in generation Generation{generationId=36, memberId='consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c-7bd934c9-75d8-4437-9353-b13d1e4d0124', protocol='cooperative-sticky'}
2025-09-28 11:03:54.044 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.AbstractCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Successfully synced group in generation Generation{generationId=37, memberId='consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98-bd8ebd21-6acd-4fc0-9ded-a788e288a241', protocol='cooperative-sticky'}
2025-09-28 11:03:54.044 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Updating assignment with
Assigned partitions: [vms_dlq_hello-topic-0]
Current owned partitions: []
Added partitions (assigned - owned): [vms_dlq_hello-topic-0]
Revoked partitions (owned - assigned): []
2025-09-28 11:03:54.044 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Updating assignment with
Assigned partitions: [hello-topic-0]
Current owned partitions: []
Added partitions (assigned - owned): [hello-topic-0]
Revoked partitions (owned - assigned): []
2025-09-28 11:03:54.044 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Notifying assignor about the new Assignment(partitions=[vms_dlq_hello-topic-0])
2025-09-28 11:03:54.044 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Notifying assignor about the new Assignment(partitions=[hello-topic-0])
2025-09-28 11:03:54.045 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Adding newly assigned partitions: hello-topic-0
2025-09-28 11:03:54.045 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Adding newly assigned partitions: vms_dlq_hello-topic-0
2025-09-28 11:03:54.045 INFO 32724 --- [opic_demo-group] com.tplink.smb.eventcenter.api.Handler : ending rebalance!
2025-09-28 11:03:54.045 INFO 32724 --- [_dlq-demo-group] com.tplink.smb.eventcenter.api.Handler : ending rebalance!
2025-09-28 11:03:54.050 INFO 32724 --- [opic_demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_b94d67dd-1a75-47e8-bf08-7fad2391ba98, groupId=demo-group] Setting offset for partition hello-topic-0 to the committed offset FetchPosition{offset=20, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-09-28 11:03:54.050 INFO 32724 --- [_dlq-demo-group] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_3f58662d-1bfd-406d-984a-de87f469d74c, groupId=dlq-demo-group] Setting offset for partition vms_dlq_hello-topic-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
尝试处理消息: Hello Kafka! 这是一条会触发死信的消息
2025-09-28 11:03:54.090 WARN 32724 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : Eventhandle failed (attempt 1/2): 模拟业务处理失败
2025-09-28 11:03:54.090 INFO 32724 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : Scheduled delay of 1000ms for event
尝试处理消息: Hello Kafka! 这是一条会触发死信的消息
2025-09-28 11:03:55.091 WARN 32724 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : Eventhandle failed (attempt 2/2): 模拟业务处理失败
2025-09-28 11:03:55.092 INFO 32724 --- [nPool-worker-22] c.t.s.e.port.kafka.KafkaEventCenter : not implement yet
2025-09-28 11:03:55.093 INFO 32724 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : Event sent to DLQ topic: vms_dlq_hello-topic
2025-09-28 11:03:55.093 ERROR 32724 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : Event failed after 2 retries, sent to DLQ;