hbase 源代码分析 (12) Master和RegionService 启动过程

第9篇hbase shell启动过程时,最后说到HMaster和HRegionService启动入口。而本地模式只需要启动 HMaster就行,内部会启动zk和 HRegionService

主要流程。

1)HMaster 是HregionService的实现类。所以启动HMaster会先启动 HregionService,
2) HregionService里面启动了很多进程。主要有协处理器,zk,rpc,锁,状态追踪器,wal继承或者类。
3)HMaster会向zk里注册信息。写入当前ServerName,方便Client找到。
4) HMaster 会加载Meta信息到内存。然后等待RegionService的报告,如果没有收到报告,会去zk 询问已经启动但是没有报告的RegionService,
5) HMaster会启动均衡
6)会启动一下健康检测的机制。



HMaster.java
/**
 * @see org.apache.hadoop.hbase.master.HMasterCommandLine
 */
public static void main(String [] args) {
  VersionInfo.logVersion();
  new HMasterCommandLine(HMaster.class).doMain(args);
}
这里会调用HmasterCommandLine.java的run线程
   
  1. public int run(String args[]) throws Exception {
  2. //此处有省略,
  3. //主要是参数设置,
  4. String command = remainingArgs.get(0);
  5. if ("start".equals(command)) {
  6. return startMaster();
  7. } else if ("stop".equals(command)) {
  8. return stopMaster();
  9. } else if ("clear".equals(command)) {
  10. return (ZNodeClearer.clear(getConf()) ? 0 : 1);
  11. } else {
  12. usage("Invalid command: " + command);
  13. return 1;
  14. }
  15. }

startMaster
   
  1. private int startMaster() {
  2. Configuration conf = getConf();
  3. try {
  4.     //本地模式。master和regionService 公用一个jVM
  5. if (LocalHBaseCluster.isLocal(conf)) {
  6. .... //省略配置zk信息
  7. LocalHBaseCluster cluster = new LocalHBaseCluster(conf, mastersCount, regionServersCount,
  8. LocalHMaster.class, HRegionServer.class);
  9. ((LocalHMaster)cluster.getMaster(0)).setZKCluster(zooKeeperCluster);
  10. cluster.startup();
  11. waitOnMasterThreads(cluster);
  12. } else {
  13. //    集群模式
  14. logProcessInfo(getConf());
  15. CoordinatedStateManager csm =
  16. CoordinatedStateManagerFactory.getCoordinatedStateManager(conf);
  17. HMaster master = HMaster.constructMaster(masterClass, conf, csm);
  18. master.start();
  19. master.join();
  20. if(master.isAborted())
  21. throw new RuntimeException("HMaster Aborted");
  22. }
  23. }
  24. }

1)本地模式
本地模式主要是将new localMaster,new regionService .这里里面主要是启动协处理管理器 ZkCoordinatedStateManager MasterThread, RegionServerThread 启动了大量协处理。
@Override
public void initialize(Server server) {
  this.server = server;
  this.watcher = server.getZooKeeper();
  splitLogWorkerCoordination = new ZkSplitLogWorkerCoordination(this, watcher);
  splitLogManagerCoordination = new ZKSplitLogManagerCoordination(this, watcher);
  splitTransactionCoordination = new ZKSplitTransactionCoordination(this, watcher);
  closeRegionCoordination = new ZkCloseRegionCoordination(this, watcher);
  openRegionCoordination = new ZkOpenRegionCoordination(this, watcher);
  regionMergeCoordination = new ZkRegionMergeCoordination(this, watcher);
}
因为Hmaster继承RegionServicer,所以Hmaster和regionService都需要启动这些。
   
  1. public HRegionServer(Configuration conf, CoordinatedStateManager csm)
  2. throws IOException, InterruptedException {
  3. super("RegionServer"); // thread name
  4. //此处有省略,
  5. //主要配置信息
  6. rpcControllerFactory = RpcControllerFactory.instantiate(this.conf);
  7. rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf);
  8. // 设置了些jaas,keytab等文件
  9. ZKUtil.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE,
  10. HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL, hostName);
  11. // 登录
  12. login(userProvider, hostName);
  13. //启动文件系统
  14. this.fs = new HFileSystem(this.conf, useHBaseChecksum);
  15. //加载meta信息
  16. this.tableDescriptors = new FSTableDescriptors(
  17. this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
  18. //执行服务器
  19. service = new ExecutorService(getServerName().toShortString());
  20. //hbase-site.xml中读取span
  21. spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
  22. // Some unit tests don't need a cluster, so no zookeeper at all
  23. if (!conf.getBoolean("hbase.testing.nocluster", false)) {
  24. // Open connection to zookeeper and set primary watcher
  25.     //集群采用用,zk
  26. zooKeeper = new ZooKeeperWatcher(conf, getProcessName() + ":" +
  27. rpcServices.isa.getPort(), this, canCreateBaseZNode());
  28. //协处理器管理器  
  29. this.csm = (BaseCoordinatedStateManager) csm;
  30. this.csm.initialize(this);
  31. this.csm.start();
  32. //table 锁
  33. tableLockManager = TableLockManager.createTableLockManager(
  34. conf, zooKeeper, serverName);
  35. //集群需要最终master的地址,
  36. masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this);
  37. masterAddressTracker.start();
  38. //集群需要追踪集群状态。
  39. clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this);
  40. clusterStatusTracker.start();
  41. }
  42. //rpc服务端启动,接收client的请求
  43. rpcServices.start();
  44. //启动web页面
  45. putUpWebUI();
  46. //WAL
  47. this.walRoller = new LogRoller(this, this);
  48. //一些杂事处理。
  49. this.choreService = new ChoreService(getServerName().toString(), true);
  50. this.flushThroughputController = FlushThroughputControllerFactory.create(this, conf);
  51. //这个没看懂,
  52. if (!SystemUtils.IS_OS_WINDOWS) {
  53. Signal.handle(new Signal("HUP"), new SignalHandler() {
  54. @Override
  55. public void handle(Signal signal) {
  56. getConfiguration().reloadConfiguration();
  57. configurationManager.notifyAllObservers(getConfiguration());
  58. }
  59. });
  60. }
  61. //定时删除不用的压缩文件。
  62. this.compactedFileDischarger =
  63. new CompactedHFilesDischarger(cleanerInterval, (Stoppable)this, (RegionServerServices)this);
  64. choreService.scheduleChore(compactedFileDischarger);
  65. }

HMaster 一些特殊的启动
   
  1. public HMaster(final Configuration conf, CoordinatedStateManager csm)
  2. throws IOException, KeeperException, InterruptedException {
  3. super(conf, csm);
  4.  
  5. //集群状态发表。 比如当regionService 死了,要立即告知client ,不要用client等待socket回应了。  
  6. clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
  7. getChoreService().scheduleChore(clusterStatusPublisherChore);
  8. // Some unit tests don't need a cluster, so no zookeeper at all
  9. if (!conf.getBoolean("hbase.testing.nocluster", false)) {
  10. //注入Master到zk,
  11. activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
  12. int infoPort = putUpJettyServer();
  13. //启动Master 这里主要是,在zk里建立一个节点,然后将master的主机信息写入zk
  14. startActiveMasterManager(infoPort);
  15. } else {
  16. activeMasterManager = null;
  17. }
  18. }

这个startActiveMasterManager(InfoPort)很有研究价值。最终会调用下面的方法。在 ActiveMasterManager里

 //主要是当这个master启动起来后,去尝试王zk写入节点信息。如果写成功了,这这个master是有效的master节点,然后从备份中删除自己,如果写入不成功,这说明有有效的主节点了。然后去检查一下有效的节点是否和当前情况一样。如果一样的,说明已经启动过 ,删除zk信息,然后删除磁盘信息。
   
  1. boolean blockUntilBecomingActiveMaster(
  2. int checkInterval, MonitoredTask startupStatus) {
  3. String backupZNode = ZKUtil.joinZNode(
  4. this.watcher.backupMasterAddressesZNode, this.sn.toString());
  5. while (!(master.isAborted() || master.isStopped())) {
  6. startupStatus.setStatus("Trying to register in ZK as active master");
  7. try {
  8. if (MasterAddressTracker.setMasterAddress(this.watcher,
  9. this.watcher.getMasterAddressZNode(), this.sn, infoPort)) {
  10. // If we were a backup master before, delete our ZNode from the backup
  11. // master directory since we are the active now)
  12. if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) {
  13. LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory");
  14. ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
  15. }
  16. // Save the znode in a file, this will allow to check if we crash in the launch scripts
  17. ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString());
  18. // We are the master, return
  19. startupStatus.setStatus("Successfully registered as active master.");
  20. this.clusterHasActiveMaster.set(true);
  21. LOG.info("Registered Active Master=" + this.sn);
  22. return true;
  23. }
  24. // There is another active master running elsewhere or this is a restart
  25. // and the master ephemeral node has not expired yet.
  26. this.clusterHasActiveMaster.set(true);
  27. String msg;
  28. byte[] bytes =
  29. ZKUtil.getDataAndWatch(this.watcher, this.watcher.getMasterAddressZNode());
  30. if (bytes == null) {
  31. msg = ("A master was detected, but went down before its address " +
  32. "could be read. Attempting to become the next active master");
  33. } else {
  34. ServerName currentMaster;
  35. try {
  36. currentMaster = ServerName.parseFrom(bytes);
  37. } catch (DeserializationException e) {
  38. LOG.warn("Failed parse", e);
  39. // Hopefully next time around we won't fail the parse. Dangerous.
  40. continue;
  41. }
  42. if (ServerName.isSameHostnameAndPort(currentMaster, this.sn)) {
  43. msg = ("Current master has this master's address, " +
  44. currentMaster + "; master was restarted? Deleting node.");
  45. // Hurry along the expiration of the znode.
  46. ZKUtil.deleteNode(this.watcher, this.watcher.getMasterAddressZNode());
  47. // We may have failed to delete the znode at the previous step, but
  48. // we delete the file anyway: a second attempt to delete the znode is likely to fail again.
  49. ZNodeClearer.deleteMyEphemeralNodeOnDisk();
  50. } else {
  51. msg = "Another master is the active master, " + currentMaster +
  52. "; waiting to become the next active master";
  53. }
  54. }
  55. LOG.info(msg);
  56. startupStatus.setStatus(msg);
  57. } catch (KeeperException ke) {
  58. master.abort("Received an unexpected KeeperException, aborting", ke);
  59. return false;
  60. }
  61. synchronized (this.clusterHasActiveMaster) {
  62. while (clusterHasActiveMaster.get() && !master.isStopped()) {
  63. try {
  64. clusterHasActiveMaster.wait(checkInterval);
  65. } catch (InterruptedException e) {
  66. // We expect to be interrupted when a master dies,
  67. // will fall out if so
  68. LOG.debug("Interrupted waiting for master to die", e);
  69. }
  70. }
  71. if (clusterShutDown.get()) {
  72. this.master.stop(
  73. "Cluster went down before this master became active");
  74. }
  75. }
  76. }
  77. return false;
  78. }

然后是启动的Master 不是backup的。需要调用这个方法finishActiveMasterInitialization

这个方法最主要的功能,
等待regionService 向HMaster 报告,如果没有 这会去zk 问一下原因。

   
  1. private void finishActiveMasterInitialization(MonitoredTask status)
  2. throws IOException, InterruptedException, KeeperException, CoordinatedStateException {
  3. /*
  4. * We are active master now... go initialize components we need to run.
  5. * Note, there may be dross in zk from previous runs; it'll get addressed
  6. * below after we determine if cluster startup or failover.
  7. */
  8. status.setStatus("Initializing Master file system");
  9. this.masterActiveTime = System.currentTimeMillis();
  10. // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
  11. this.fileSystemManager = new MasterFileSystem(this,
  12. //装载表信息到内存
  13. // enable table descriptors cache
  14. this.tableDescriptors.setCacheOn();
  15. // set the META's descriptor to the correct replication
  16. this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
  17. conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
  18. // warm-up HTDs cache on master initialization
  19. if (preLoadTableDescriptors) {
  20. status.setStatus("Pre-loading table descriptors");
  21. this.tableDescriptors.getAll();
  22. }
  23. //写入Cluster ID 到zk
  24. // publish cluster ID
  25. status.setStatus("Publishing Cluster ID in ZooKeeper");
  26. ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
  27. this.serverManager = createServerManager(this, this);
  28. setupClusterConnection();
  29. // Invalidate all write locks held previously
  30. this.tableLockManager.reapWriteLocks();
  31. status.setStatus("Initializing ZK system trackers");
  32. initializeZKBasedSystemTrackers();
  33. // initialize master side coprocessors before we start handling requests
  34. status.setStatus("Initializing master coprocessors");
  35. this.cpHost = new MasterCoprocessorHost(this, this.conf);
  36. // start up all service threads.
  37. status.setStatus("Initializing master service threads");
  38. startServiceThreads();
  39. // Wake up this server to check in
  40. sleeper.skipSleepCycle();
  41. //等待regionService注册
  42. // Wait for region servers to report in
  43. this.serverManager.waitForRegionServers(status);
  44. // Check zk for region servers that are up but didn't register
  45. for (ServerName sn: this.regionServerTracker.getOnlineServers()) {
  46. // The isServerOnline check is opportunistic, correctness is handled inside
  47. if (!this.serverManager.isServerOnline(sn)
  48. && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) {
  49. LOG.info("Registered server found up in zk but who has not yet reported in: " + sn);
  50. }
  51. }
  52. // get a list for previously failed RS which need log splitting work
  53. // we recover hbase:meta region servers inside master initialization and
  54. // handle other failed servers in SSH in order to start up master node ASAP
  55. Set<ServerName> previouslyFailedServers =
  56. this.fileSystemManager.getFailedServersFromLogFolders();
  57. // log splitting for hbase:meta server
  58. ServerName oldMetaServerLocation = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
  59. if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) {
  60. splitMetaLogBeforeAssignment(oldMetaServerLocation);
  61. // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it
  62. // may also host user regions
  63. }
  64. Set<ServerName> previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK();
  65. // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers
  66. // instead of previouslyFailedMetaRSs alone to address the following two situations:
  67. // 1) the chained failure situation(recovery failed multiple times in a row).
  68. // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the
  69. // same server still has non-meta wals to be replayed so that
  70. // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region
  71. // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal,
  72. // there is no op for the server.
  73. previouslyFailedMetaRSs.addAll(previouslyFailedServers);
  74. this.initializationBeforeMetaAssignment = true;
  75. // Wait for regionserver to finish initialization.
  76. if (BaseLoadBalancer.tablesOnMaster(conf)) {
  77. waitForServerOnline();
  78. }
  79. //设置负载均衡
  80. //initialize load balancer
  81. this.balancer.setClusterStatus(getClusterStatus());
  82. this.balancer.setMasterServices(this);
  83. this.balancer.initialize();
  84. // Check if master is shutting down because of some issue
  85. // in initializing the regionserver or the balancer.
  86. if (isStopped()) return;
  87. // Make sure meta assigned before proceeding.
  88. status.setStatus("Assigning Meta Region");
  89. assignMeta(status, previouslyFailedMetaRSs, HRegionInfo.DEFAULT_REPLICA_ID);
  90. // check if master is shutting down because above assignMeta could return even hbase:meta isn't
  91. // assigned when master is shutting down
  92. if (isStopped()) return;
  93. status.setStatus("Submitting log splitting work for previously failed region servers");
  94. // Master has recovered hbase:meta region server and we put
  95. // other failed region servers in a queue to be handled later by SSH
  96. for (ServerName tmpServer : previouslyFailedServers) {
  97. this.serverManager.processDeadServer(tmpServer, true);
  98. }
  99. // Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
  100. // in meta. This must happen before we assign all user regions or else the assignment will fail.
  101. if (this.conf.getBoolean("hbase.MetaMigrationConvertingToPB", true)) {
  102. MetaMigrationConvertingToPB.updateMetaIfNecessary(this);
  103. }
  104. // Fix up assignment manager status
  105. status.setStatus("Starting assignment manager");
  106. this.assignmentManager.joinCluster();
  107. // set cluster status again after user regions are assigned
  108. this.balancer.setClusterStatus(getClusterStatus());
  109. // Start balancer and meta catalog janitor after meta and regions have been assigned.
  110. status.setStatus("Starting balancer and catalog janitor");
  111. this.clusterStatusChore = new ClusterStatusChore(this, balancer);
  112. getChoreService().scheduleChore(clusterStatusChore);
  113. this.balancerChore = new BalancerChore(this);
  114. getChoreService().scheduleChore(balancerChore);
  115. this.normalizerChore = new RegionNormalizerChore(this);
  116. getChoreService().scheduleChore(normalizerChore);
  117. this.catalogJanitorChore = new CatalogJanitor(this, this);
  118. getChoreService().scheduleChore(catalogJanitorChore);
  119. // Do Metrics periodically
  120. periodicDoMetricsChore = new PeriodicDoMetrics(msgInterval, this);
  121. getChoreService().scheduleChore(periodicDoMetricsChore);
  122. status.setStatus("Starting namespace manager");
  123. initNamespace();
  124. if (this.cpHost != null) {
  125. try {
  126. this.cpHost.preMasterInitialization();
  127. } catch (IOException e) {
  128. LOG.error("Coprocessor preMasterInitialization() hook failed", e);
  129. }
  130. }
  131. status.markComplete("Initialization successful");
  132. LOG.info("Master has completed initialization");
  133. configurationManager.registerObserver(this.balancer);
  134. // Set master as 'initialized'.
  135. setInitialized(true);
  136. status.setStatus("Starting quota manager");
  137. initQuotaManager();
  138. // assign the meta replicas
  139. Set<ServerName> EMPTY_SET = new HashSet<ServerName>();
  140. int numReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
  141. HConstants.DEFAULT_META_REPLICA_NUM);
  142. for (int i = 1; i < numReplicas; i++) {
  143. assignMeta(status, EMPTY_SET, i);
  144. }
  145. unassignExcessMetaReplica(zooKeeper, numReplicas);
  146. // clear the dead servers with same host name and port of online server because we are not
  147. // removing dead server with same hostname and port of rs which is trying to check in before
  148. // master initialization. See HBASE-5916.
  149. this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
  150. // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
  151. status.setStatus("Checking ZNode ACLs");
  152. zooKeeper.checkAndSetZNodeAcls();
  153. status.setStatus("Calling postStartMaster coprocessors");
  154. if (this.cpHost != null) {
  155. // don't let cp initialization errors kill the master
  156. try {
  157. this.cpHost.postStartMaster();
  158. } catch (IOException ioe) {
  159. LOG.error("Coprocessor postStartMaster() hook failed", ioe);
  160. }
  161. }
  162. zombieDetector.interrupt();
  163. }

最后调用run方法。

里面主要是启动一下健康检查等类,然后等待停止。



regionServer 的run里面还有很多线程启动,最终要的是 MemStoreFlusher, CompactSplitThread, registerConfigurationObservers等。
memstore线程将在下个章节 flush 和 spit过程中使用到。
    
  1. private void initializeThreads() throws IOException {
  2. // Cache flushing thread.
  3. this.cacheFlusher = new MemStoreFlusher(conf, this);
  4. // Compaction thread
  5. this.compactSplitThread = new CompactSplitThread(this);
  6. // Background thread to check for compactions; needed if region has not gotten updates
  7. // in a while. It will take care of not checking too frequently on store-by-store basis.
  8. this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this);
  9. this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this);
  10. this.leases = new Leases(this.threadWakeFrequency);
  11. // Create the thread to clean the moved regions list
  12. movedRegionsCleaner = MovedRegionsCleaner.create(this);
  13. if (this.nonceManager != null) {
  14. // Create the scheduled chore that cleans up nonces.
  15. nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this);
  16. }
  17. // Setup the Quota Manager
  18. rsQuotaManager = new RegionServerQuotaManager(this);
  19. // Setup RPC client for master communication
  20. rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
  21. rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics());
  22. if (storefileRefreshPeriod > 0) {
  23. this.storefileRefresher = new StorefileRefresherChore(storefileRefreshPeriod,
  24. onlyMetaRefresh, this, this);
  25. }
  26. registerConfigurationObservers();
  27. }


到此结束。

上一章节:hbase 原代码分析 (11) WAL 写日志过程

http://blog.youkuaiyun.com/chenfenggang/article/details/75142075



regionServer 的run里面还有很多线程启动,最终要的是 MemStoreFlusher, CompactSplitThread, registerConfigurationObservers等。
    
  1. private void initializeThreads() throws IOException {
  2. // Cache flushing thread.
  3. this.cacheFlusher = new MemStoreFlusher(conf, this);
  4. // Compaction thread
  5. this.compactSplitThread = new CompactSplitThread(this);
  6. // Background thread to check for compactions; needed if region has not gotten updates
  7. // in a while. It will take care of not checking too frequently on store-by-store basis.
  8. this.compactionChecker = new CompactionChecker(this, this.threadWakeFrequency, this);
  9. this.periodicFlusher = new PeriodicMemstoreFlusher(this.threadWakeFrequency, this);
  10. this.leases = new Leases(this.threadWakeFrequency);
  11. // Create the thread to clean the moved regions list
  12. movedRegionsCleaner = MovedRegionsCleaner.create(this);
  13. if (this.nonceManager != null) {
  14. // Create the scheduled chore that cleans up nonces.
  15. nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this);
  16. }
  17. // Setup the Quota Manager
  18. rsQuotaManager = new RegionServerQuotaManager(this);
  19. // Setup RPC client for master communication
  20. rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
  21. rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics());
  22. boolean onlyMetaRefresh = false;
  23. int storefileRefreshPeriod = conf.getInt(
  24. StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD
  25. , StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD);
  26. if (storefileRefreshPeriod == 0) {
  27. storefileRefreshPeriod = conf.getInt(
  28. StorefileRefresherChore.REGIONSERVER_META_STOREFILE_REFRESH_PERIOD,
  29. StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD);
  30. onlyMetaRefresh = true;
  31. }
  32. if (storefileRefreshPeriod > 0) {
  33. this.storefileRefresher = new StorefileRefresherChore(storefileRefreshPeriod,
  34. onlyMetaRefresh, this, this);
  35. }
  36. registerConfigurationObservers();
  37. }

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值