public void forwardToTargetTopic(DelayEvent<Event> delayEvent) {
EventFuture eventFuture = new EventFuture() {
@Override
public void onSuccess(EventCenterSendResult eventCenterSendResult) {
}
@Override
public void onFailure(Throwable throwable) {
}
};
try {
send(
delayEvent.getTopic(),
delayEvent.getKey(),
delayEvent.getPartition(),
delayEvent.getEvent(),
eventFuture
);
log.info("延迟事件已转发,targetTopic: {}", delayEvent.getTopic());
} catch (Exception e) {
log.error("转发延迟事件失败,targetTopic: {}", delayEvent.getTopic(), e);
}
}
/**
* 将原始事件转发到目标Topic(JSON)序列化
*
* @param delayEvent 封装好的延迟事件
*/
public void forwardToTargetTopicWithJSON(DelayEvent delayEvent) {
GenericEventFuture genericEventFuture = new GenericEventFuture() {
@Override
public void onSuccess(GenericEventCenterSendResult genericEventCenterSendResult) {
}
@Override
public void onFailure(Throwable throwable) {
}
};
try {
sendGenerically(
delayEvent.getTopic(),
delayEvent.getKey(),
delayEvent.getPartition(),
delayEvent.getEvent(),
genericEventFuture
);
log.info("延迟事件已转发,targetTopic: {}", delayEvent.getTopic());
} catch (Exception e) {
log.error("转发延迟事件失败,targetTopic: {}", delayEvent.getTopic(), e);
}
}
/**
* 将原始事件转发到目标Topic(KRYO)序列化
*
* @param delayEventV2 封装好的延迟事件
*/
public void forwardToTargetTopicWithKYRO(DelayEvent<EventV2> delayEventV2) {
GenericEventFuture genericEventFuture = new GenericEventFuture() {
@Override
public void onSuccess(GenericEventCenterSendResult genericEventCenterSendResult) {
}
@Override
public void onFailure(Throwable throwable) {
}
};
try {
sendWithKryo(
delayEventV2.getTopic(),
delayEventV2.getKey(),
delayEventV2.getPartition(),
delayEventV2.getEvent(),
genericEventFuture
);
log.info("延迟事件已转发,targetTopic: {}", delayEventV2.getTopic());
} catch (Exception e) {
log.error("转发延迟事件失败,targetTopic: {}", delayEventV2.getTopic(), e);
}
}@Slf4j
@RequiredArgsConstructor
public class DelayHandler implements GenericEventHandler<DelayEvent> {
private final KafkaEventCenter eventCenter;
private final ScheduledExecutorService delayScheduler;
private final Map<String, AtomicInteger> pauseCounts = new ConcurrentHashMap<>();
@Override
public void handleEvent(DelayEvent delayEvent) {
long currentTime = System.currentTimeMillis();
long expirationTime = delayEvent.getExpirationTime();
if (expirationTime <= currentTime) {
if (delayEvent.getSerializeEnum() == null) {
eventCenter.forwardToTargetTopic(delayEvent);
}
if (delayEvent.getSerializeEnum() == SerializeEnum.JSON) {
eventCenter.forwardToTargetTopicWithJSON(delayEvent);
}
if (delayEvent.getSerializeEnum() == SerializeEnum.KRYO) {
eventCenter.forwardToTargetTopicWithKYRO(delayEvent);
}
} else {
String targetTopic = "delay_topic_level_" + delayEvent.getLevel();
pauseTargetTopic(targetTopic);
long delayMs = expirationTime - currentTime;
// 调度延迟任务\
if (delayEvent.getSerializeEnum() == null) {
delayScheduler.schedule(() -> {
log.info("延迟任务执行:targetTopic={}, currentTime={}", targetTopic, System.currentTimeMillis());
resumeTargetTopic(targetTopic); // 恢复topic
eventCenter.forwardToTargetTopic(delayEvent); // 转发消息
}, delayMs, TimeUnit.MILLISECONDS);
} else if (delayEvent.getSerializeEnum() == SerializeEnum.JSON) {
delayScheduler.schedule(() -> {
log.info("延迟任务执行:targetTopic={}, currentTime={}", targetTopic, System.currentTimeMillis());
resumeTargetTopic(targetTopic); // 恢复topic
eventCenter.forwardToTargetTopicWithJSON(delayEvent); // 转发消息
}, delayMs, TimeUnit.MILLISECONDS);
} else if (delayEvent.getSerializeEnum() == SerializeEnum.KRYO) {
delayScheduler.schedule(() -> {
log.info("延迟任务执行:targetTopic={}, currentTime={}", targetTopic, System.currentTimeMillis());
resumeTargetTopic(targetTopic); // 恢复topic
eventCenter.forwardToTargetTopicWithKYRO(delayEvent); // 转发消息
}, delayMs, TimeUnit.MILLISECONDS);
}
}
}
// 暂停逻辑:计数增加,仅首次暂停
private void pauseTargetTopic(String targetTopic) {
pauseCounts.computeIfAbsent(targetTopic, k -> new AtomicInteger(0)).incrementAndGet();
if (pauseCounts.get(targetTopic).get() == 1) {
try {
eventCenter.pauseTopicGenerically(targetTopic);
} catch (Exception e) {
log.error("暂停topic失败:{}", targetTopic, e);
pauseCounts.remove(targetTopic);
}
}
}
// 恢复逻辑计数减少,计数为0时恢复
private void resumeTargetTopic(String targetTopic) {
AtomicInteger count = pauseCounts.get(targetTopic);
if (count != null && count.decrementAndGet() == 0) {
try {
eventCenter.resumeTopicGenerically(targetTopic);
pauseCounts.remove(targetTopic);
} catch (Exception e) {
log.error("恢复topic失败:{}", targetTopic, e);
}
}
}
}@Data
@Slf4j
@ToString
@AllArgsConstructor
@NoArgsConstructor(force = true)
public class DelayEvent<T> {
private final String topic;
private final String key;
private final Integer partition;
private final T event;
private final long level;
private final long expirationTime;
private final SerializeEnum serializeEnum;
}
@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class, DataSourceTransactionManagerAutoConfiguration.class})
@ComponentScan(basePackages = {
"com.tplink.nbu.demo.basicspringboot",
"com.tplink.smb.eventcenter.port.kafka.deadletter",
"com.tplink.smb.eventcenter.api.config"
})
public class DelayDeadTest implements CommandLineRunner {
@Autowired
private KafkaEventCenter eventCenter;
@Autowired
private DLQConfig deadLetterConfig;
// 所有测试主Topic(对应不同重载方法)
String mainTopic = "hello-unicast-dlq-no-partition"; // registerUnicast(DLQ, 无Partition)
public static void main(String[] args) {
SpringApplication.run(DelayDeadTest.class, args);
}
@Override
public void run(String... args) throws Exception {
eventCenter.registerDelayConsumer();
registerDLQConsumers(); // 注册所有带死信的消费者(模拟失败)
registerDLQListeners(); // 注册所有死信Topic的监听
sendTestMessagesToAllTopics();// 发送测试消息触发死信
}
/**
* 注册所有带死信的消费者
*/
private void registerDLQConsumers() {
// 模拟消费失败的Handler(所有测试共用)
EventHandler failingHandler = event -> {
System.out.println(" 消息: " + event.getMessage());
throw new RuntimeException("[消费失败] 模拟业务异常 ");
};
// -------------------------- 1. registerUnicast(DLQ, 无PartitionAssignorMode) --------------------------
eventCenter.registerUnicast(
mainTopic, // 主Topic
"group-unicast-dlq-no-part", // 消费者组(手动指定)
failingHandler,
ForkJoinPool.commonPool(), // 线程池
deadLetterConfig // 死信配置
);
}
/**
* 注册所有死信Topic的监听(死信Topic规则:主Topic + "_dlq_topic")
*/
private void registerDLQListeners() {
String dlqTopic = mainTopic + "_dlq_topic"; // 死信Topic(与业务代码规则一致)
eventCenter.registerUnicast(
dlqTopic,
"dlq-group-" + mainTopic, // 死信消费者组(唯一即可)
event -> System.out.println("[死信捕获] 死信Topic: " + dlqTopic + ", 消息: " + event.getMessage()),
ForkJoinPool.commonPool()
);
}
/**
* 给所有主Topic发送测试消息(触发消费失败)
*/
private void sendTestMessagesToAllTopics() {
Event event = new Event(
"key",
"测试消息(主Topic: " + mainTopic + ")- 会触发死信"
);
eventCenter.send(mainTopic, event);
System.out.println("[消息发送] 主Topic: " + mainTopic + ", 内容: " + event.getMessage());
}
}2025-10-15 16:23:38.846 INFO 26080 --- [lq-no-partition] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_6e96b661-9818-46d0-8686-f07821d20fad, groupId=dlq-group-hello-unicast-dlq-no-partition] Setting offset for partition hello-unicast-dlq-no-partition_dlq_topic-0 to the committed offset FetchPosition{offset=26027, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_5] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_49557bac-f7f7-4157-bafb-e9a5c41700ff, groupId=delay_consumer_group_level_5] Setting offset for partition delay_topic_level_5-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_8c9c4162-45cc-43d8-a0e1-fbac2bea3ed3, groupId=delay_consumer_group_level_1] Setting offset for partition delay_topic_level_1-0 to the committed offset FetchPosition{offset=14780, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [ast-dlq-no-part] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_adf360e0-bbe0-42b2-ae34-304c4ddc12ee, groupId=group-unicast-dlq-no-part] Setting offset for partition hello-unicast-dlq-no-partition-0 to the committed offset FetchPosition{offset=26044, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [_group_level_13] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_f5900c11-9147-4c1d-920c-d472d289d6a8, groupId=delay_consumer_group_level_13] Setting offset for partition delay_topic_level_13-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [_group_level_16] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_8fd2a0d0-acb9-4823-84ce-c973e3afa8d4, groupId=delay_consumer_group_level_16] Setting offset for partition delay_topic_level_16-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_3] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_0488ce2f-d7d2-4872-ad3e-7092bbfc0445, groupId=delay_consumer_group_level_3] Setting offset for partition delay_topic_level_3-0 to the committed offset FetchPosition{offset=44, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_7] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_1367ffe6-8d3b-4c7a-8ff2-e35fa05c1e00, groupId=delay_consumer_group_level_7] Setting offset for partition delay_topic_level_7-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_2] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_e852eb13-5c3b-49e3-ad70-0c562840ec22, groupId=delay_consumer_group_level_2] Setting offset for partition delay_topic_level_2-0 to the committed offset FetchPosition{offset=15183, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_9] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_58bd1156-d633-414d-a1a8-bc3165477918, groupId=delay_consumer_group_level_9] Setting offset for partition delay_topic_level_9-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_6] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_4b5c02b7-373b-4321-b6f9-bc713c9eaaf5, groupId=delay_consumer_group_level_6] Setting offset for partition delay_topic_level_6-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [_group_level_14] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_5fb9f9a9-b2f5-414a-ab46-890223735f85, groupId=delay_consumer_group_level_14] Setting offset for partition delay_topic_level_14-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
2025-10-15 16:23:38.846 INFO 26080 --- [r_group_level_8] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer_10.13.35.30_1e3367cf-7cc9-4e81-81a3-4512c428c480, groupId=delay_consumer_group_level_8] Setting offset for partition delay_topic_level_8-0 to the committed offset FetchPosition{offset=0, offsetEpoch=Optional.empty, currentLeader=LeaderAndEpoch{leader=Optional[admin1-virtual-machine:9092 (id: 0 rack: null)], epoch=absent}}
消息: 测试消息(主Topic: hello-unicast-dlq-no-partition)- 会触发死信
2025-10-15 16:23:38.880 INFO 26080 --- [nPool-worker-22] c.t.s.e.port.kafka.KafkaEventCenter : 延迟等级level=1, 到期时间expirationTime=1760516619880
2025-10-15 16:23:38.883 INFO 26080 --- [nPool-worker-22] c.t.s.e.p.k.d.DLQEventHandlerWrapper : 事件延时 1000ms 后重试
2025-10-15 16:23:38.896 INFO 26080 --- [r_group_level_1] c.t.s.e.p.k.c.GenericKafkaConsumerTask : 实际暂停成功,topic: delay_topic_level_1
2025-10-15 16:23:39.885 INFO 26080 --- [pool-2-thread-1] c.t.s.e.port.kafka.delay.DelayHandler : 延迟任务执行:targetTopic=delay_topic_level_1, currentTime=1760516619885
2025-10-15 16:23:39.900 INFO 26080 --- [r_group_level_1] c.t.s.e.p.k.c.GenericKafkaConsumerTask : 实际恢复成功,topic: delay_topic_level_1
2025-10-15 16:23:39.906 ERROR 26080 --- [pool-2-thread-1] c.t.s.e.port.kafka.KafkaEventCenter : 转发延迟事件失败,targetTopic: hello-unicast-dlq-no-partition
java.lang.ClassCastException: java.util.LinkedHashMap cannot be cast to com.tplink.smb.eventcenter.api.Event
at com.tplink.smb.eventcenter.port.kafka.KafkaEventCenter.forwardToTargetTopic(KafkaEventCenter.java:2478) ~[eventcenter.kafka-1.4.5002-test-SNAPSHOT.jar:1.4.5002-test-SNAPSHOT]
at com.tplink.smb.eventcenter.port.kafka.delay.DelayHandler.lambda$handleEvent$0(DelayHandler.java:55) [eventcenter.kafka-1.4.5002-test-SNAPSHOT.jar:1.4.5002-test-SNAPSHOT]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[na:1.8.0_462-462]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) ~[na:1.8.0_462-462]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180) ~[na:1.8.0_462-462]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293) ~[na:1.8.0_462-462]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) ~[na:1.8.0_462-462]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) ~[na:1.8.0_462-462]
请分析并解决
最新发布