消费者正在进行正常的消费过程中以及rebalance操作之前,都会进行提交offset的操作,其操作任务是将消费者的每个分区对应的offset封装成offsetCommitRequest发送给GroupCoordinator,GroupCoordinator会把这些offset封装成消息,追加到对应的__CONSUMER_OFFSET中。在GroupCoordinatorManager与offsetsCommitRequest处理相关的方法有两个,一个是prepareStreoOffsets,它负责产生DelayedStore对象,其中封装了消息和回调函数。而是store方法,它负责__CONSUMER_OFFSETS中追加消息。
def prepareStoreOffsets(groupId: String,
consumerId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicPartition, Short] => Unit): DelayedStore = {
// first filter out partitions with offset metadata size exceeding limit
// 检测OffsetAndMetadat.metadata字段的长度,metadata字段默认是空字段
// 消费者可以在OffsetCommitRequest携带除offset以外的额外说明信息,经过解析后会添加到metadata字段
val filteredOffsetMetadata = offsetMetadata.filter { case (topicPartition, offsetAndMetadata) =>
validateOffsetMetadataLength(offsetAndMetadata.metadata)
}
// construct the message set to append
val messages = filteredOffsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
//获取对应__CONSUMER_OFFSETS分区使用消息格式信息
val (magicValue, timestamp) = getMessageFormatVersionAndTimestamp(partitionFor(groupId))
// 创建记录offset信息的消息,消息的value是offsetAndMetadata中的数据
new Message(
key = GroupMetadataManager.offsetCommitKey(groupId, topicAndPartition.topic, topicAndPartition.partition),
bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata),
timestamp = timestamp,
magicValue = magicValue
)
}.toSeq
// 获取Consumer Group对应__CONSUMER_OFFSETS分区
val offsetTopicPartition = new TopicPartition(TopicConstants.GROUP_METADATA_TOPIC_NAME, partitionFor(groupId))
// 获取__CONSUMER_OFFSETS分区与消息集合的对应关系
val offsetsAndMetadataMessageSet = Map(offsetTopicPartition ->
new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages:_*))
// set the callback function to insert offsets into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || ! responseStatus.contains(offsetTopicPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, offsetTopicPartition))
// construct the commit response status and insert
// the offset and metadata to cache if the append status has no error
val status = responseStatus(offsetTopicPartition)
val responseCode =
if (status.errorCode == Errors.NONE.code) {
// 追加消息成功,更新offsetCahce集合中对应的offsetAndMetadata对象
filteredOffsetMetadata.foreach { case (topicAndPartition, offsetAndMetadata) =>
putOffset(GroupTopicPartition(groupId, topicAndPartition), offsetAndMetadata)
}
Errors.NONE.code
} else {
debug("Offset commit %s from group %s consumer %s with generation %d failed when appending to log due to %s"
.format(filteredOffsetMetadata, groupId, consumerId, generationId, Errors.forCode(status.errorCode).exceptionName))
// transform the log append error code to the corresponding the commit status error code
if (status.errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code
else if (status.errorCode == Errors.NOT_LEADER_FOR_PARTITION.code)
Errors.NOT_COORDINATOR_FOR_GROUP.code
else if (status.errorCode == Errors.MESSAGE_TOO_LARGE.code
|| status.errorCode == Errors.RECORD_LIST_TOO_LARGE.code
|| status.errorCode == Errors.INVALID_FETCH_SIZE.code)
Errors.INVALID_COMMIT_OFFSET_SIZE.code
else
status.errorCode
}
// compute the final error codes for the commit response
val commitStatus = offsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
if (validateOffsetMetadataLength(offsetAndMetadata.metadata))
(topicAndPartition, responseCode)
else
(topicAndPartition, Errors.OFFSET_METADATA_TOO_LARGE.code)
}
// finally trigger the callback logic passed from the API layer
// 回调函数
responseCallback(commitStatus)
}
DelayedStore(offsetsAndMetadataMessageSet, putCacheCallback)
}