文章目录
消息存储
1.实时更新消息消费队列和索引文件
消息消费队文件、消息属性索引文件都是基于CommitLog文件构建的,当消息生产者提交的消息存储在CommitLog文件中,ConsumerQueue、IndexFile需要及时更新,否则消息无法及时被消费,根据消息属性查找消息也会出现较大延迟。RocketMQ通过开启一个线程ReputMessageService来准实时转发CommitLog文件更新事件,相应的任务处理器根据转发的消息及时更新ConsumerQueue、IndexFile文件。

构建消息消费队列和索引文件的时序图:

DefaultMessageStore#start
log.info("[SetReputOffset] maxPhysicalPosInLogicQueue={} clMinOffset={} clMaxOffset={} clConfirmedOffset={}",
maxPhysicalPosInLogicQueue, this.commitLog.getMinOffset(), this.commitLog.getMaxOffset(), this.commitLog.getConfirmOffset());
//设置CommitLog内存中最大偏移量
this.reputMessageService.setReputFromOffset(maxPhysicalPosInLogicQueue);
//启动消息分发服务线程
this.reputMessageService.start();
ReputMessageService#run
DefaultMessageStore.log.info(this.getServiceName() + " service started");
//线程状态为启动状态->每隔1毫秒就继续尝试推送消息到消息消费队列和索引文件
while (!this.isStopped()) {
try {
Thread.sleep(1);
//进行消息分发
this.doReput();
} catch (Exception e) {
DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
}
}
DefaultMessageStore.log.info(this.getServiceName() + " service end");
ReputMessageService#doReput
this.reputFromOffset = result.getStartOffset();
for (int readSize = 0; readSize < result.getSize() && doNext; ) {
//从result中循环遍历消息,一次读一条,创建DispatchRequest对象。
DispatchRequest dispatchRequest =
DefaultMessageStore.this.commitLog.checkMessageAndReturnSize(result.getByteBuffer(), false, false);
int size = dispatchRequest.getBufferSize() == -1 ? dispatchRequest.getMsgSize() : dispatchRequest.getBufferSize();
if (dispatchRequest.isSuccess()) {
if (size > 0) {
//---------------------------↓↓↓-------------------------
//分发请求
DefaultMessageStore.this.doDispatch(dispatchRequest);
//当新消息达到 进行通知监听器进行处理
if (BrokerRole.SLAVE != DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole()
&& DefaultMessageStore.this.brokerConfig.isLongPollingEnable()
&& DefaultMessageStore.this.messageArrivingListener != null) {
//messageArrivingListener对新发送的消息进行监听
DefaultMessageStore.this.messageArrivingListener.arriving(dispatchRequest.getTopic(),
dispatchRequest.getQueueId(), dispatchRequest.getConsumeQueueOffset() + 1,
dispatchRequest.getTagsCode(), dispatchRequest.getStoreTimestamp(),
dispatchRequest.getBitMap(), dispatchRequest.getPropertiesMap());
}
//更新消息分发偏移量
this.reputFromOffset += size;
readSize += size;
if (DefaultMessageStore.this.getMessageStoreConfig().getBrokerRole() == BrokerRole.SLAVE) {
DefaultMessageStore.this.storeStatsService
.getSinglePutMessageTopicTimesTotal(dispatchRequest.getTopic()).add(1);
DefaultMessageStore.this.storeStatsService
.getSinglePutMessageTopicSizeTotal(dispatchRequest.getTopic())
.add(dispatchRequest.getMsgSize());
}
}
....
}
DispatchRequest
String topic; //消息主题名称
int queueId; //消息队列ID
long commitLogOffset; //消息物理偏移量
int msgSize; //消息长度
long tagsCode; //消息过滤tag hashCode
long storeTimestamp; //消息存储时间戳
long consumeQueueOffset; //消息队列偏移量
String keys; //消息索引key
boolean success; //是否成功解析到完整的消息
String uniqKey; //消息唯一键
int sysFlag; //消息系统标记
long preparedTransactionOffset; //消息预处理事务偏移量
Map<String, String> propertiesMap; //消息属性
byte[] bitMap; //位图
DefaultMessageStore#doDispatch
for (CommitLogDispatcher dispatcher : this.dispatcherList) {
dispatcher.dispatch(req);
}
1.1.转发到ConsumerQueue
CommitLogDispatcherBuildConsumeQueue#dispatch
final int tranType = MessageSysFlag.getTransactionValue(request.getSysFlag());
switch (tranType) {
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
//消息分发
DefaultMessageStore.this.putMessagePositionInfo(request);
break;
case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
break;
}
DefaultMessageStore#putMessagePositionInfo
//根据请求中的主题和队列ID获得消费队列
ConsumeQueue cq = this.findConsumeQueue(dispatchRequest.getTopic(), dispatchRequest.getQueueId());
//消息队列分发消息
cq.putMessagePositionInfoWrapper(dispatchRequest);
ConsumeQueue#putMessagePositionInfoWrapper
boolean result = this.putMessagePositionInfo(request.getCommitLogOffset(),
ConsumeQueue#putMessagePositionInfo
private boolean putMessagePositionInfo(
final long offset, //commitLog偏移量
final int size, //消息体大小
final long tagsCode, //消息tags的hashCode
final long cqOffset ) { //写入consumerqueue的偏移量
//依次将消息偏移量、消息长度、tag写入到ByteBuffer中
this.byteBufferIndex.flip();
this.byteBufferIndex.limit(CQ_STORE_UNIT_SIZE);
this.byteBufferIndex.putLong(offset);
this.byteBufferIndex.putInt(size);
this.byteBufferIndex.putLong(tagsCode);
//计算期望插入ConsumerQueue的文件位置
final long expectLogicOffset = cqOffset * CQ_STORE_UNIT_SIZE;
//获得内存映射文件
MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile(expectLogicOffset);
if(mappedFile != null){
//如果文件是新建的->进行填充
if (mappedFile.isFirstCreateInQueue() && cqOffset != 0 && mappedFile.getWrotePosition() == 0) {
this.minLogicOffset = expectLogicOffset;
this.mappedFileQueue.setFlushedWhere(expectLogicOffset);
this.mappedFileQueue.setCommittedWhere(expectLogicOffset);
this.fillPreBlank(mappedFile, expectLogicOffset);
log.info("fill pre blank space " + mappedFile.getFileName() + " " + expectLogicOffset + " "
+ mappedFile.getWrotePosition());
}
//将消息追加到内存映射文件,异步输盘->整个过程都是基于MappedFile
return mappedFile.appendMessage(this.byteBufferIndex.array());
}
}
1.2.转发到Index
核心实现类是IndexService,存储Index文件的封装类是IndexFile。
IndexFile
// 每个 hash 槽所占的字节数
private static int hashSlotSize = 4;
//每个indexFile条目所占用字节数
private static int indexSize = 20;
//用来验证是否是一个有效索引
private static int invalidIndex = 0;
//indexFile中hash槽的个数
private final int hashSlotNum;
//indexFile中包含的条目数
private final int indexNum;
//对应的MappedFile
private final MappedFile mappedFile;
//文件传输通道
private final FileChannel fileChannel;
//pageCache
private final MappedByteBuffer mappedByteBuffer;
//每一个IndexFile的头部信息
private final IndexHeader indexHeader;
消息分发到索引文件的时序图:

CommitLogDispatcherBuildIndex#dispatch
//开启文件索引
if (DefaultMessageStore.this.messageStoreConfig.isMessageIndexEnable()) {
//构建索引
DefaultMessageStore.this.indexService.buildIndex(request);
}
IndexService#buildIndex
//创建或获取索引文件
IndexFile indexFile = retryGetAndCreateIndexFile();
if (indexFile != null) {
//获得文件最大物理偏移量
long endPhyOffset = indexFile.getEndPhyOffset();
DispatchRequest msg = req;
//消息主题
String topic = msg.getTopic();
//消息key
String keys = msg.getKeys();
//如果该消息的物理偏移量小于索引文件中的最大物理偏移量,则说明是重复数据,忽略本次索引构建
if (msg.getCommitLogOffset() < endPhyOffset) {
return;
}
final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
switch (tranType) {
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
break;
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
return;
}
//如果消息ID不为空,则添加到Hash索引中
if (req.getUniqKey() != null) {
indexFile = putKey(indexFile, msg, buildKey(topic, req.getUniqKey()));
if (indexFile == null) {
log.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
return;
}
}
//构建索引key,RocketMQ支持为同一个消息建立多个索引,多个索引键空格隔开.
if (keys != null && keys.length() > 0) {
String[] keyset = keys.split(MessageConst.KEY_SEPARATOR);
for (int i = 0; i < keyset.length; i++) {
String key = keyset[i];
if (key.length() > 0) {
//-----------↓-----------
indexFile = putKey(indexFile, msg, buildKey(topic, key));
if (indexFile == null) {
log.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
return;
}
}
}
}
} else {
log.error("build index error, stop building index");
}
IndexService#putKey
for (boolean ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp()); !ok; ) {
log.warn("Index file [" + indexFile.getFileName() + "] is full, trying to create another one");
//重试-获取或者创建Index文件
indexFile = retryGetAndCreateIndexFile();
if (null == indexFile) {
return null;
}
ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp());
}
return indexFile;
IndexFile#putKey
public boolean putKey(final String key,
final long phyOffset, //消息存储在commitLog的偏移量
final long storeTimestamp) { //消息存入commitLog的时间戳
//如果IndexFile存储的条目数小于最大条目数限制-允许存储
//否则表示存储失败-返回false
if (this.indexHeader.getIndexCount() < this.indexNum) {
int keyHash = indexKeyHashMethod(key);
int slotPos = keyHash % this.hashSlotNum; //根据keyHash和hash槽数量取模->得到该key在槽中的下标
int absSlotPos = IndexHeader.INDEX_HEADER_SIZE + slotPos * hashSlotSize;
FileLock fileLock = null;
try {
// fileLock = this.fileChannel.lock(absSlotPos, hashSlotSize,
// false);
int slotValue = this.mappedByteBuffer.getInt(absSlotPos);
if (slotValue <= invalidIndex || slotValue > this.indexHeader.getIndexCount()) {
slotValue = invalidIndex;
}
long timeDiff = storeTimestamp - this.indexHeader.getBeginTimestamp();
timeDiff = timeDiff / 1000;
if (this.indexHeader.getBeginTimestamp() <= 0) {
timeDiff = 0;
} else if (timeDiff > Integer.MAX_VALUE) {
timeDiff = Integer.MAX_VALUE;
} else if (timeDiff < 0) {
timeDiff = 0;
}
int absIndexPos =
IndexHeader.INDEX_HEADER_SIZE + this.hashSlotNum * hashSlotSize
+ this.indexHeader.getIndexCount() * indexSize;
this.mappedByteBuffer.putInt(absIndexPos, keyHash);
this.mappedByteBuffer.putLong(absIndexPos + 4, phyOffset);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8, (int) timeDiff);
this.mappedByteBuffer.putInt(absIndexPos + 4 + 8 + 4, slotValue);
this.mappedByteBuffer.putInt(absSlotPos, this.indexHeader.getIndexCount());
if (this.indexHeader.getIndexCount() <= 1) {
this.indexHeader.setBeginPhyOffset(phyOffset);
this.indexHeader.setBeginTimestamp(storeTimestamp);
}
if (invalidIndex == slotValue) {
this.indexHeader.incHashSlotCount();
}
this.indexHeader.incIndexCount();
this.indexHeader.setEndPhyOffset(phyOffset);
this.indexHeader.setEndTimestamp(storeTimestamp);
return true;
} catch (Exception e) {
log.error("putKey exception, Key: " + key + " KeyHashCode: " + key.hashCode(), e);
} finally {
if (fileLock != null) {
try {
fileLock.release();
} catch (IOException e) {
log.error("Failed to release the lock", e);
}
}
}
} else {
log.warn("Over index file capacity: index count = " + this.indexHeader.getIndexCount()
+ "; index max num = " + this.indexNum);
}
return false;
}
这里涉及到索引文件的存储格式和查找,暂不在此做解析。
2.消息队列和索引文件恢复
由于RocketMQ存储首先将消息全量存储在CommitLog文件中,然后异步生成转发任务更新ConsumerQueue和Index文件。如果消息成功存储到CommitLog文件中,转发任务未成功执行,此时消息服务器Broker由于某种原因宕机,导致CommitLog、ConsumerQueue、IndexFile文件数据不一致。如果不加以人工修复的话,会有一部分消息即便在CommitLog中文件中存在,但由于没有转发到ConsumerQueue,这部分消息将永远不被消费者消费。
2.1.存储文件加载
判断上一次是否是异常退出——实现机制是Broker在启动时会创建abort文件,在退出时会通过JVM钩子函数删除abort文件。如果下次启动时发现abort仍然存在,则说明Broker上次退出是异常的,CommitLog和ConsumerQueue数据可能存在不一致的情况,需要进行文件修复。
//检查上次退出是否异常
boolean lastExitOK = !this.isTempFileExist();
↓↓↓
private boolean isTempFileExist() {
//尝试获取abort临时文件
String fileName = StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir());
File file = new File(fileName);
//如果存在即说明上次退出异常
return file.exists();
}
DefaultMessageStore#load
//加载结果
boolean result = true;
try {
//判断是否是正常退出
boolean lastExitOK = !this.isTempFileExist();
log.info("last shutdown {}", lastExitOK ? "normally" : "abnormally");
//1.加载commitLog文件
result = result && this.commitLog.load();
//2.加载consumeQueue文件
result = result && this.loadConsumeQueue();
if (result) {
//加载store目录下的所有存储文件
this.storeCheckpoint =
new StoreCheckpoint(StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir()));
//3.加载index文件
this.indexService.load(lastExitOK);
//4.根据broker是否异常退出采取不同的恢复策略
this.recover(lastExitOK);
log.info("load over, and the max phy offset = {}", this.getMaxPhyOffset());
if (null != scheduleMessageService) {
result = this.scheduleMessageService.load();
}
}
} catch (Exception e) {
log.error("load exception", e);
result = false;
}
//如果加载失败->关闭MappedFile分配服务线程
if (!result) {
this.allocateMappedFileService.shutdown();
}
return result;
2.1.1.加载commitLog文件
commitLog.load()→mappedFileQueue.load()
MappedFileQueue#load
//根据文件目录获取文件
File dir = new File(this.storePath);
//获得文件数组
File[] ls = dir.listFiles();
if (ls != null) {
return doLoad(Arrays.asList(ls));
}
return true;
MappedFileQueue#doLoad
//文件进行排序
files.sort(Comparator.comparing(File::getName));
//遍历文件列表
for (File file : files) {
//如果文件大小与配置文件不一致 退出
if (file.length() != this.mappedFileSize) {
log.warn(file + "\t" + file.length()
+ " length not matched message store config value, ignore it");
return true;
}
try {
//创建映射文件
MappedFile mappedFile = new MappedFile(file.getPath(), mappedFileSize);
//设置映射文件的指针
mappedFile.setWrotePosition(this.mappedFileSize);
mappedFile.setFlushedPosition(this.mappedFileSize);
mappedFile.setCommittedPosition(this.mappedFileSize);
//将映射文件加到队列中
this.mappedFiles.add(mappedFile);
log.info("load " + file.getPath() + " OK");
} catch (IOException e) {
log.error("load file " + file + " error", e);
return false;
}
}
return true;
2.1.2.加载消息消费队列
DefaultMessageStore#loadConsumeQueue
//获得消费队列目录
File dirLogic = new File(StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()));
File[] fileTopicList = dirLogic.listFiles();
if (fileTopicList != null) {
//遍历消费队列目录
for (File fileTopic : fileTopicList) {
//获得子目录名称,即为topic名称
String topic = fileTopic.getName();
//遍历子目录下的消费队列文件
File[] fileQueueIdList = fileTopic.listFiles();
if (fileQueueIdList != null) {
//遍历文件
for (File fileQueueId : fileQueueIdList) {
int queueId;
try {
queueId = Integer.parseInt(fileQueueId.getName());
} catch (NumberFormatException e) {
continue;
}
//创建消费目录并加载到内存中
ConsumeQueue logic = new ConsumeQueue(
topic,
queueId,
StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()),
this.getMessageStoreConfig().getMappedFileSizeConsumeQueue(),
this);
//存入consumeQueueTable消息队列表
this.putConsumeQueue(topic, queueId, logic);
if (!logic.load()) {
return false;
}
}
}
}
}
//打印加载消费队列成功日志
log.info("load logics queue all over, OK");
return true;
2.1.3.加载索引文件
IndexService#load
//根据索引文件目录加载文件
File dir = new File(this.storePath);
//获取文件数组
File[] files = dir.listFiles();
if (files != null) {
// ascending order
//文件排序
Arrays.sort(files);
//文件遍历
for (File file : files) {
try {
//创建索引文件
IndexFile f = new IndexFile(file.getPath(), this.hashSlotNum, this.indexNum, 0, 0);
//加载
f.load();
//如果异常退出
if (!lastExitOK) {
//索引文件上次刷盘时间大于检测点时间戳
if (f.getEndTimestamp() > this.defaultMessageStore.getStoreCheckpoint()
.getIndexMsgTimestamp()) {
//该文件立刻删除
f.destroy(0);
continue;
}
}
log.info("load index file OK, " + f.getFileName());
//将符合条件索引文件加入列表
this.indexFileList.add(f);
} catch (IOException e) {
log.error("load file {} error", file, e);
return false;
} catch (NumberFormatException e) {
log.error("load file {} error", file, e);
}
}
}
return true;
所有文件均加载完毕,进行文件恢复,根据Broker是否正常退出执行不同的恢复策略。
DefaultMessageStore#recover
long maxPhyOffsetOfConsumeQueue = this.recoverConsumeQueue();
if (lastExitOK) {
//正常恢复
this.commitLog.recoverNormally(maxPhyOffsetOfConsumeQueue);
} else {
//异常恢复
this.commitLog.recoverAbnormally(maxPhyOffsetOfConsumeQueue);
}
//在CommitLog中保存每个消息消费队列当前的存储逻辑偏移量
this.recoverTopicQueueTable();
DefaultMessageStore#recoverConsumeQueue
long maxPhysicOffset = -1;
for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
for (ConsumeQueue logic : maps.values()) {
//覆盖消息队列
logic.recover();
if (logic.getMaxPhysicOffset() > maxPhysicOffset) {
maxPhysicOffset = logic.getMaxPhysicOffset();
}
}
}
return maxPhysicOffset;
ConsumeQueue#recover
//获取消息队列的所有映射文件
final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
if (!mappedFiles.isEmpty()) {
int index = mappedFiles.size() - 3; //从倒数第三个文件开始
if (index < 0) {
index = 0;
}
//consumerQueue逻辑大小
int mappedFileSizeLogics = this.mappedFileSize;
//consumerQueue对应的映射文件
MappedFile mappedFile = mappedFiles.get(index);
//映射文件对应的ByteBuffer
ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
//处理的offset-默认从consumerQueue中存放的第一个条目开始
long processOffset = mappedFile.getFileFromOffset();
long mappedFileOffset = 0;
long maxExtAddr = 1;
while (true) {
//循环验证consumerQueue包含条目的有效性
for (int i = 0; i < mappedFileSizeLogics; i += CQ_STORE_UNIT_SIZE) {
//读取条目内容
//commitLog物理偏移量
long offset = byteBuffer.getLong();
//消息总长度
int size = byteBuffer.getInt();
//tag哈希值
long tagsCode = byteBuffer.getLong();
//offset大于0并且size大于0说明该条目是一个有效的
if (offset >= 0 && size > 0) {
mappedFileOffset = i + CQ_STORE_UNIT_SIZE;
this.maxPhysicOffset = offset + size;
if (isExtAddr(tagsCode)) {
maxExtAddr = tagsCode;
}
} else {
log.info("recover current consume queue file over, " + mappedFile.getFileName() + " "
+ offset + " " + size + " " + tagsCode);
break;
}
}
//如果该consumeQueue中包含的条目全部有效则继续验证下一个文件-index++
if (mappedFileOffset == mappedFileSizeLogics) {
index++;
//验证完毕-退出-开始恢复消息队列文件
if (index >= mappedFiles.size()) {
log.info("recover last consume queue file over, last mapped file "
+ mappedFile.getFileName());
break;
} else {
//获取下一个映射文件-继续循环检查
mappedFile = mappedFiles.get(index);
byteBuffer = mappedFile.sliceByteBuffer();
processOffset = mappedFile.getFileFromOffset();
mappedFileOffset = 0;
log.info("recover next consume queue file, " + mappedFile.getFileName());
}
} else {
log.info("recover current consume queue queue over " + mappedFile.getFileName() + " "
+ (processOffset + mappedFileOffset));
break;
}
}
//当前consumerQueue有效偏移量
processOffset += mappedFileOffset;
//设置flushWhere、committedWhere为有效偏移量
this.mappedFileQueue.setFlushedWhere(processOffset);
this.mappedFileQueue.setCommittedWhere(processOffset);
//删除冗余无效的consumerQueue文件
this.mappedFileQueue.truncateDirtyFiles(processOffset);
if (isExtReadEnable()) {
this.consumeQueueExt.recover();
log.info("Truncate consume queue extend file by max {}", maxExtAddr);
this.consumeQueueExt.truncateByMaxAddress(maxExtAddr);
}
}
DefaultMessageStore#recoverTopicQueueTable
HashMap<String/* topic-queueid */, Long/* offset */> table = new HashMap<String, Long>(1024);
//CommitLog最小偏移量
long minPhyOffset = this.commitLog.getMinOffset();
//遍历消费队列,将消费队列保存在CommitLog中
for (ConcurrentMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
for (ConsumeQueue logic : maps.values()) {
String key = logic.getTopic() + "-" + logic.getQueueId();
table.put(key, logic.getMaxOffsetInQueue());
logic.correctMinOffset(minPhyOffset);
}
}
//覆盖主题消息队列信息
this.commitLog.setTopicQueueTable(table);
2.2.正常恢复
CommitLog恢复过程与ConsumeQueue恢复过程极其相似。
CommitLog#recoverNormally
boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
//获取映射文件列表
final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
if (!mappedFiles.isEmpty()) {
//Broker正常停止再重启时,从倒数第三个开始恢复,如果不足3个文件,则从第一个文件开始恢复。
int index = mappedFiles.size() - 3;
if (index < 0) {
index = 0;
}
MappedFile mappedFile = mappedFiles.get(index);
ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
long processOffset = mappedFile.getFileFromOffset();
//代表当前已校验通过的offset
long mappedFileOffset = 0;
while (true) {
//检查消息并返回分发请求
DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
//消息长度
int size = dispatchRequest.getMsgSize();
//查找结果为true,并且消息长度大于0,表示消息正确.mappedFileOffset向前移动本消息长度
if (dispatchRequest.isSuccess() && size > 0) {
mappedFileOffset += size;
}
//如果查找结果为true且消息长度等于0,表示已到该文件末尾,如果还有下一个文件,则重置processOffset和MappedFileOffset重复查找下一个文件,否则跳出循环。
else if (dispatchRequest.isSuccess() && size == 0) {
index++;
//完成检查-退出循环
if (index >= mappedFiles.size()) {
// Current branch can not happen
log.info("recover last 3 physics file over, last mapped file " + mappedFile.getFileName());
break;
} else {
//获取并检查下一个映射文件
mappedFile = mappedFiles.get(index);
byteBuffer = mappedFile.sliceByteBuffer();
processOffset = mappedFile.getFileFromOffset();
//重置检查offset
mappedFileOffset = 0;
log.info("recover next physics file, " + mappedFile.getFileName());
}
}
// Intermediate file read error
else if (!dispatchRequest.isSuccess()) {
log.info("recover physics file end, " + mappedFile.getFileName());
break;
}
}
//更新MappedFileQueue的flushedWhere和committedWhere指针
processOffset += mappedFileOffset;
this.mappedFileQueue.setFlushedWhere(processOffset);
this.mappedFileQueue.setCommittedWhere(processOffset);
//删除之后的冗余无效的文件
this.mappedFileQueue.truncateDirtyFiles(processOffset);
// Clear ConsumeQueue redundant data
if (maxPhyOffsetOfConsumeQueue >= processOffset) {
log.warn("maxPhyOffsetOfConsumeQueue({}) >= processOffset({}), truncate dirty logic files", maxPhyOffsetOfConsumeQueue, processOffset);
this.defaultMessageStore.truncateDirtyLogicFiles(processOffset);
}
} else {
// Commitlog case files are deleted
log.warn("The commitlog files are deleted, and delete the consume queue files");
this.mappedFileQueue.setFlushedWhere(0);
this.mappedFileQueue.setCommittedWhere(0);
this.defaultMessageStore.destroyLogics();
MappedFileQueue#truncateDirtyFiles
//即将移除文件
List<MappedFile> willRemoveFiles = new ArrayList<MappedFile>();
//遍历映射文件
for (MappedFile file : this.mappedFiles) {
long fileTailOffset = file.getFileFromOffset() + this.mappedFileSize;
//文件尾部的偏移量大于offset
if (fileTailOffset > offset) {
//offset大于文件的起始偏移量
if (offset >= file.getFileFromOffset()) {
//更新wrotePosition、committedPosition、flushedPosistion
file.setWrotePosition((int) (offset % this.mappedFileSize));
file.setCommittedPosition((int) (offset % this.mappedFileSize));
file.setFlushedPosition((int) (offset % this.mappedFileSize));
} else {
//offset小于文件的起始偏移量,说明该文件是有效文件后面创建的,释放mappedFile占用内存,删除文件
file.destroy(1000);
willRemoveFiles.add(file);
}
}
}
this.deleteExpiredFile(willRemoveFiles);
2.3.异常恢复
Broker异常停止文件恢复的实现为CommitLog#recoverAbnormally。异常文件恢复步骤与正常停止文件恢复流程基本相同,其主要差别有两个。首先,正常停止默认从倒数第三个文件开始进行恢复,而异常停止则需要从最后一个文件往前走,找到第一个消息存储正常的文件。其次,如果CommitLog目录没有消息文件,如果消息消费队列目录下存在文件,则需要销毁。
CommitLog#recoverAbnormally
boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
final List<MappedFile> mappedFiles = this.mappedFileQueue.getMappedFiles();
if (!mappedFiles.isEmpty()) {
// Looking beginning to recover from which file
//从最后一个文件开始检查
int index = mappedFiles.size() - 1;
MappedFile mappedFile = null;
for (; index >= 0; index--) {
mappedFile = mappedFiles.get(index);
//如果该文件是一个正确的文件
if (this.isMappedFileMatchedRecover(mappedFile)) {
log.info("recover from this mapped file " + mappedFile.getFileName());
break;
}
}
//恰好正确文件是第一个-<index-->之后index小于0
if (index < 0) {
//需要将索引校正为0
index = 0;
//取出第一个文件-从第一个文件开始恢复
mappedFile = mappedFiles.get(index);
}
ByteBuffer byteBuffer = mappedFile.sliceByteBuffer();
long processOffset = mappedFile.getFileFromOffset();
long mappedFileOffset = 0;
while (true) {
//验证消息是否合法
DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
int size = dispatchRequest.getMsgSize();
//消息校验成功
if (dispatchRequest.isSuccess()) {
// 正q
if (size > 0) {
mappedFileOffset += size;
//将消息转发到消息消费队列和索引文件
if (this.defaultMessageStore.getMessageStoreConfig().isDuplicationEnable()) {
if (dispatchRequest.getCommitLogOffset() < this.defaultMessageStore.getConfirmOffset()) {
this.defaultMessageStore.doDispatch(dispatchRequest);
}
} else {
this.defaultMessageStore.doDispatch(dispatchRequest);
}
}
//到达文件尾
else if (size == 0) {
index++;//文件索引向后移动
//文件遍历完毕
if (index >= mappedFiles.size()) {
// The current branch under normal circumstances should
// not happen
log.info("recover physics file over, last mapped file " + mappedFile.getFileName());
break;
} else {
//获取下一个的映射文件
mappedFile = mappedFiles.get(index);
byteBuffer = mappedFile.sliceByteBuffer();
processOffset = mappedFile.getFileFromOffset();
mappedFileOffset = 0;
log.info("recover next physics file, " + mappedFile.getFileName());
}
}
} else {
log.info("recover physics file end, " + mappedFile.getFileName() + " pos=" + byteBuffer.position());
break;
}
}
//当前有效偏移量
processOffset += mappedFileOffset;
//设置flushWhere、committedWhere为有效偏移量
this.mappedFileQueue.setFlushedWhere(processOffset);
this.mappedFileQueue.setCommittedWhere(processOffset);
//清除冗余的数据
this.mappedFileQueue.truncateDirtyFiles(processOffset);
//清除冗余的数据
if (maxPhyOffsetOfConsumeQueue >= processOffset) {
log.warn("maxPhyOffsetOfConsumeQueue({}) >= processOffset({}), truncate dirty logic files", maxPhyOffsetOfConsumeQueue, processOffset);
this.defaultMessageStore.truncateDirtyLogicFiles(processOffset);
}
}
//文件被删除
else {
log.warn("The commitlog files are deleted, and delete the consume queue files");
//flushWhere committedWhere重置
this.mappedFileQueue.setFlushedWhere(0);
this.mappedFileQueue.setCommittedWhere(0);
//销毁消息队列文件
this.defaultMessageStore.destroyLogics();
}
本文仅作为个人学习使用,如有不足或错误请指正!
172





