看看这个报错
11-03-2025 09:21:15.498 WARN [kafka-coordinator-heartbeat-thread | tapocare.local] [TID: N/A] o.a.k.c.c.i.AbstractCoordinator: [Consumer clientId=consumer-46, groupId=tapocare.local] This member will leave the group because consumer poll timeout has expired. This means the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time processing messages. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
11-03-2025 09:21:15.500 WARN [kafka-coordinator-heartbeat-thread | tapocare.local] [TID: N/A] o.a.k.c.c.i.AbstractCoordinator: [Consumer clientId=consumer-19, groupId=tapocare.local] This member will leave the group because consumer poll timeout has expired. This means the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time processing messages. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
11-03-2025 09:21:15.500 WARN [kafka-coordinator-heartbeat-thread | tapocare.local] [TID: N/A] o.a.k.c.c.i.AbstractCoordinator: [Consumer clientId=consumer-10, groupId=tapocare.local] This member will leave the group because consumer poll timeout has expired. This means the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time processing messages. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
11-03-2025 09:21:15.514 ERROR [main] [TID: N/A] c.t.n.c.a.c.i.SharerInternalController: Delete failed for device dev4: class com.tplink.storage.index.proto.deviceactivity.BatchDeleteDeviceActivitiesRequest cannot be cast to class com.tplink.nbu.cloudstorage.tapocare.openapi.proto.service.BatchDeleteDeviceActivitiesRequest (com.tplink.storage.index.proto.deviceactivity.BatchDeleteDeviceActivitiesRequest and com.tplink.nbu.cloudstorage.tapocare.openapi.proto.service.BatchDeleteDeviceActivitiesRequest are in unnamed module of loader 'app')
11-03-2025 09:21:15.668 WARN [main] [TID: N/A] c.t.n.c.a.u.HttpRequestUtil: empty client ip
11-03-2025 09:21:15.731 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#23-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-28, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:15.731 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-37, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:15.924 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#0-2-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-33, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:15.955 INFO [main] [TID: N/A] c.t.n.c.a.s.i.StatServiceImpl: [STAT] {"path":"POST /internal/v1/Sharer/batchDeleteActivities","status":200,"exeMs":673914,"requestBody":"{\"owner2Device2EventIdsmap\":{\"owner1\":{\"dev1\":[\"event1\",\"event2\"],\"dev2\":[\"event3\"]},\"owner2\":{\"dev3\":[\"event4\"],\"dev4\":[]}},\"orderServiceType\":null,\"subscriptionMeta\":{\"productLine\":\"TAPO_CARE\",\"orderServiceType\":\"TAPO_CARE\",\"serviceId\":\"TAPO_CARE\",\"serviceName\":\"Tapo Care\",\"openapiClientId\":\"\",\"openApiProductLine\":false},\"destregion\":null}","responseBody":"{\"errorCode\":17200}","params":"{}","clientIp":"BsV8no6a9yLYUT6rbu2PdA==","cipherSuite":"0","appTimeZone":"","statKey":"@ApiStat"}
MockHttpServletRequest:
HTTP Method = POST
Request URI = /internal/v1/Sharer/batchDeleteActivities
Parameters = {}
Headers = [Content-Type:"application/json;charset=UTF-8", Content-Length:"343"]
Body = {"owner2Device2EventIdsmap":{"owner1":{"dev1":["event1","event2"],"dev2":["event3"]},"owner2":{"dev3":["event4"],"dev4":[]}},"orderServiceType":null,"subscriptionMeta":{"productLine":"TAPO_CARE","orderServiceType":"TAPO_CARE","serviceId":"TAPO_CARE","serviceName":"Tapo Care","openapiClientId":"","openApiProductLine":false},"destregion":null}
Session Attrs = {}
Handler:
Type = com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalController
Method = com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalController#batchDeleteDeviceActivities(InternalbatchDeleteDeviceActivitiesReq)
Async:
Async started = false
Async result = null
Resolved Exception:
Type = null
ModelAndView:
View name = null
View = null
Model = null
FlashMap:
Attributes = null
MockHttpServletResponse:
Status = 200
Error message = null
Headers = [Vary:"Origin", "Access-Control-Request-Method", "Access-Control-Request-Headers", X-Frame-Options:"deny", X-Content-Type-Options:"nosniff", Content-Security-Policy:"default-src 'self'", Content-Type:"application/json", Content-Length:"19"]
Content type = application/json
Body = {"errorCode":17200}
Forwarded URL = null
Redirected URL = null
Cookies = []
11-03-2025 09:21:16.091 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#20-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-34, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:16.093 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#18-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-46, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
MockHttpServletRequest:
HTTP Method = POST
Request URI = /internal/v1/Sharer/batchDeleteActivities
Parameters = {}
Headers = [Content-Type:"application/json;charset=UTF-8", Content-Length:"343"]
Body = {"owner2Device2EventIdsmap":{"owner1":{"dev1":["event1","event2"],"dev2":["event3"]},"owner2":{"dev3":["event4"],"dev4":[]}},"orderServiceType":null,"subscriptionMeta":{"productLine":"TAPO_CARE","orderServiceType":"TAPO_CARE","serviceId":"TAPO_CARE","serviceName":"Tapo Care","openapiClientId":"","openApiProductLine":false},"destregion":null}
Session Attrs = {}
Handler:
Type = com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalController
Method = com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalController#batchDeleteDeviceActivities(InternalbatchDeleteDeviceActivitiesReq)
Async:
Async started = false
Async result = null
Resolved Exception:
Type = null
ModelAndView:
View name = null
View = null
Model = null
FlashMap:
Attributes = null
MockHttpServletResponse:
Status = 200
Error message = null
Headers = [Vary:"Origin", "Access-Control-Request-Method", "Access-Control-Request-Headers", X-Frame-Options:"deny", X-Content-Type-Options:"nosniff", Content-Security-Policy:"default-src 'self'", Content-Type:"application/json", Content-Length:"19"]
Content type = application/json
Body = {"errorCode":17200}
Forwarded URL = null
Redirected URL = null
Cookies = []
11-03-2025 09:21:16.116 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#8-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-7, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:16.116 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#2-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-13, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
11-03-2025 09:21:16.116 ERROR [org.springframework.kafka.KafkaListenerEndpointContainer#5-0-C-1] [TID: N/A] o.a.k.c.c.i.ConsumerCoordinator: [Consumer clientId=consumer-22, groupId=tapocare.local] User provided listener org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener failed on partition assignment
org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.sendOffsetCommitRequest(ConsumerCoordinator.java:820) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.commitOffsetsSync(ConsumerCoordinator.java:692) ~[kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.commitSync(KafkaConsumer.java:1454) ~[kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.commitCurrentOffsets(KafkaMessageListenerContainer.java:2468) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.collectAndCommitIfNecessary(KafkaMessageListenerContainer.java:2422) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerConsumerRebalanceListener.onPartitionsAssigned(KafkaMessageListenerContainer.java:2385) ~[spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.onJoinComplete(ConsumerCoordinator.java:285) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:424) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:358) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:353) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1251) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1216) [kafka-clients-2.3.1.jar:?]
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1201) [kafka-clients-2.3.1.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1108) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1064) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:992) [spring-kafka-2.3.11.RELEASE.jar:2.3.11.RELEASE]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run$$$capture(FutureTask.java:264) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java) [?:?]
at java.lang.Thread.run(Thread.java:829) [?:?]
org.mockito.exceptions.verification.NeverWantedButInvoked:
indexServerApiImpl bean.batchDeleteDeviceActivities(
<Sharer internal controller test$$ lambda$ 3 5 9 4/ 0x 0 0 0 0 0 0 0 8 4 1 8db 0 4 0>,
<any com.tplink.nbu.cloudstorage.appserver.constants.SubscriptionMeta>
);
Never wanted here:
-> at com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalControllerTest.batchDeleteDeviceActivities(SharerInternalControllerTest.java:326)
But invoked here:
-> at com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalController.batchDeleteDeviceActivities(SharerInternalController.java:181)
at com.tplink.nbu.cloudstorage.appserver.controller.internal.SharerInternalControllerTest.batchDeleteDeviceActivities(SharerInternalControllerTest.java:326)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at org.springframework.test.context.junit4.statements.RunBeforeTestExecutionCallbacks.evaluate(RunBeforeTestExecutionCallbacks.java:74)
at org.springframework.test.context.junit4.statements.RunAfterTestExecutionCallbacks.evaluate(RunAfterTestExecutionCallbacks.java:84)
at org.springframework.test.context.junit4.statements.RunBeforeTestMethodCallbacks.evaluate(RunBeforeTestMethodCallbacks.java:75)
at org.springframework.test.context.junit4.statements.RunAfterTestMethodCallbacks.evaluate(RunAfterTestMethodCallbacks.java:86)
at org.springframework.test.context.junit4.statements.SpringRepeat.evaluate(SpringRepeat.java:84)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:251)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:97)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
at org.springframework.test.context.junit4.statements.RunBeforeTestClassCallbacks.evaluate(RunBeforeTestClassCallbacks.java:61)
at org.springframework.test.context.junit4.statements.RunAfterTestClassCallbacks.evaluate(RunAfterTestClassCallbacks.java:70)
at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.run(SpringJUnit4ClassRunner.java:190)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38)
at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11)
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35)
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:231)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:55)
最新发布