KafkaServer.startup()
kafkaController.startup()
controllerElector.startup -> elect
def elect: Boolean = {
val timestamp = SystemTime.milliseconds.toString
//构建一个数据信息
//比如有version
//有自己broker id号
//有时间戳
val electString = Json.encode(Map("version" -> 1, "brokerid" -> brokerId, "timestamp" -> timestamp))
//要去获取controller的 id号。
//我们用的是场景驱动的方式,此时此刻应该就是我们的第一台服务器
//第一次启动,那么肯定还没有controller的,所以他这儿应该是获取不到
//ID
//如果是第一次启动,那么返回值是-1
leaderId = getControllerID
/*
* We can get here during the initial startup and the handleDeleted ZK callback. Because of the potential race condition,
* it's possible that the controller has already been elected when we get here. This check will prevent the following
* createEphemeralPath method from getting into an infinite loop if this broker is already the controller.
*/
if(leaderId != -1) {
debug("Broker %d has been elected as leader, so stopping the election process.".format(leaderId))
//如果代码执行到这儿说明,之前就已经完成选举了。
return amILeader
}
try {
//创建一个临时目录(如果有同学不知道ZK的临时目录有什么特点,自己下去了解一下)
// 创建这样的一个目录 /controller/,然后把这个目录里面写上一些自己的信息
val zkCheckedEphemeral = new ZKCheckedEphemeral(electionPath,
electString,
controllerContext.zkUtils.zkConnection.getZookeeper,
JaasUtils.isZkSecurityEnabled())
//创建目录
zkCheckedEphemeral.create()
info(brokerId + " successfully elected as leader")
//也就是当前的服务器就是controller服务器了
leaderId = brokerId
//如果创建完了,自己就成为了leader ,也就是controller了。
//这个是一个函数
//当一个controller被选举出来以后,就会执行这个函数。
onBecomingLeader()
}
-> getControllerID
// /controller/写数据(broker id号)
private def getControllerID(): Int = {
//从/controller目录下面去获取数据
controllerContext.zkUtils.readDataMaybeNull(electionPath)._1 match {
//如果获取到了数据,那么返回了的应该就是一个ID,这个id号就是某个broker的id号
//也就是说这个broker 就是controller
case Some(controller) => KafkaController.parseControllerId(controller)
//如果获取不到就返回-1
case None => -1
}
}
onBecomingLeader -> ZookeeperLeaderElector
-> private val controllerElector = new ZookeeperLeaderElector(controllerContext, ZkUtils.ControllerPath, onControllerFailover,
-> onControllerFailover
def onControllerFailover() {
if(isRunning) {
info("Broker %d starting become controller state transition".format(config.brokerId))
//read controller epoch from zk
readControllerEpochFromZookeeper()
// increment the controller epoch
incrementControllerEpoch(zkUtils.zkClient)
// before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks
//这个函数最重要的一个内容,就是注册各种监听器
//注册这些监听器的目的就是用来监听zk的目录的变化的。
//这些监听器是谁注册的呢?肯定是controller注册的了。
//换句话说,我们的一个controller一旦被选举出来以后
//干的第一个事,就是在ZK上面对各种各样的目录设置的了监听器
//其实说实话,controller就是通过监听这些目录的变化
//来管理Kafka集群的。
registerReassignedPartitionsListener()
registerIsrChangeNotificationListener()
registerPreferredReplicaElectionListener()
//TODO 监听分区的变化
partitionStateMachine.registerListeners()
//TODO 我们通过这儿注册了一个监听器,
//然后通过这个监听器,感知到集群里面有新的broker进来了。
replicaStateMachine.registerListeners()
initializeControllerContext()
replicaStateMachine.startup()
partitionStateMachine.startup()
// register the partition change listeners for all existing topics on failover
controllerContext.allTopics.foreach(topic => partitionStateMachine.registerPartitionChangeListener(topic))
info("Broker %d is ready to serve as the new controller with epoch %d".format(config.brokerId, epoch))
maybeTriggerPartitionReassignment()
maybeTriggerPreferredReplicaElection()
/* send partition leadership info to all live brokers */
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
if (config.autoLeaderRebalanceEnable) {
info("starting the partition rebalance scheduler")
autoRebalanceScheduler.startup()
autoRebalanceScheduler.schedule("partition-rebalance-thread", checkAndTriggerPartitionRebalance,
5, config.leaderImbalanceCheckIntervalSeconds.toLong, TimeUnit.SECONDS)
}
deleteTopicManager.start()
}
else
info("Controller has been shut down, aborting startup/failover")
}
kafka 集群管理-controller是如何选举的
最新推荐文章于 2025-03-13 21:58:43 发布