Solr4.7源码分析-启动篇(三)

本文深入探讨了Solr中SolrCore实例的创建过程,包括关键配置文件的加载、核心组件的初始化步骤以及索引目录的处理等。此外,还介绍了SolrCore内部一些重要属性的作用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

接着上一篇http://blog.youkuaiyun.com/wenchanter/article/details/37366829

在多线程创建core时,加载完solrconfig.xml和schema.xml,开始实例化一个SolrCore的实例,在生成这个实例的过程中,做了很多工作,构造函数很长:

  /**
   * Creates a new core and register it in the list of cores.
   * If a core with the same name already exists, it will be stopped and replaced by this one.
   *@param dataDir the index directory
   *@param config a solr config instance
   *@param schema a solr schema instance
   *
   *@since solr 1.3
   */
  public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd, UpdateHandler updateHandler, IndexDeletionPolicyWrapper delPolicy, SolrCore prev) {
    coreDescriptor = cd;
    this.setName( name );
    resourceLoader = config.getResourceLoader();
    this.solrConfig = config;
   
    // 初始化Directory,solrconfig.xml中的directoryFactory片段属性,默认是NRTCachingDirectoryFactory,即便不配置也会new一个,默认的maxMergeSizeMB=4,maxCachedMB=48
    if (updateHandler == null) {
      initDirectoryFactory();
    }


    // 索引文件存储路径   
    if (dataDir == null) {
      if (cd.usingDefaultDataDir()) dataDir = config.getDataDir();
      if (dataDir == null) {
        try {
          dataDir = cd.getDataDir();
          if (!directoryFactory.isAbsolute(dataDir)) {
            dataDir = directoryFactory.getDataHome(cd);
          }
        } catch (IOException e) {
          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
        }
      }
    }

    dataDir = SolrResourceLoader.normalizeDir(dataDir);
    log.info(logid+"Opening new SolrCore at " + resourceLoader.getInstanceDir() + ", dataDir="+dataDir);

    if (null != cd && null != cd.getCloudDescriptor()) {
      // we are evidently running in cloud mode.  
      //
      // In cloud mode, version field is required for correct consistency
      // ideally this check would be more fine grained, and individual features
      // would assert it when they initialize, but DistributedUpdateProcessor
      // is currently a big ball of wax that does more then just distributing
      // updates (ie: partial document updates), so it needs to work in no cloud
      // mode as well, and can't assert version field support on init.

      try {
        VersionInfo.getAndCheckVersionField(schema);
      } catch (SolrException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                                "Schema will not work with SolrCloud mode: " +
                                e.getMessage(), e);
      }
    }

    //Initialize JMX
    if (config.jmxConfig.enabled) {
      infoRegistry = new JmxMonitoredMap<String, SolrInfoMBean>(name, String.valueOf(this.hashCode()), config.jmxConfig);
    } else  {
      log.info("JMX monitoring not detected for core: " + name);
      infoRegistry = new ConcurrentHashMap<String, SolrInfoMBean>();
    }

    infoRegistry.put("fieldCache", new SolrFieldCacheMBean());

    if (schema==null) {
      schema = IndexSchemaFactory.buildIndexSchema(IndexSchema.DEFAULT_SCHEMA_FILE, config);
    }
    this.schema = schema;
    final SimilarityFactory similarityFactory = schema.getSimilarityFactory();
    if (similarityFactory instanceof SolrCoreAware) {
      // Similarity needs SolrCore before inform() is called on all registered SolrCoreAware listeners below
      ((SolrCoreAware)similarityFactory).inform(this);
    }

    this.dataDir = dataDir;
    this.startTime = System.currentTimeMillis();
    this.maxWarmingSearchers = config.maxWarmingSearchers;

    //booleanQuery最大值,在solrconfig.xml的query中配置的,默认1024
    booleanQueryMaxClauseCount();
 
    final CountDownLatch latch = new CountDownLatch(1);

    try {
     
      // solrconfig.xml中配置的listener,触发autowarming时使用的firstSearcher和newSearcher
      initListeners();


      // solrconfig.xml中配置的deletionPolicy
      if (delPolicy == null) {
        initDeletionPolicy();
      } else {
        this.solrDelPolicy = delPolicy;
      }


      // solrconfig.xml中配置的codecFactory,编解码工厂,自定义编解码器时使用,默认SchemaCodecFactory
      this.codec = initCodec(solrConfig, schema);
     
      if (updateHandler == null) {
        solrCoreState = new DefaultSolrCoreState(getDirectoryFactory());
      } else {
        solrCoreState = updateHandler.getSolrCoreState();
        directoryFactory = solrCoreState.getDirectoryFactory();
        this.isReloaded = true;
      }

      // 里面初始化了indexReaderFactory,还校验了index目录下的锁文件是否被锁定等一堆其他的事情。
      initIndex(prev != null);


      // 初始化solrconfig.xml里注册的不同的queryResponseWriter,是注册针对不同的返回值类型的不同处理类
      initWriters();
      // solrconfig.xml里的queryParser
      initQParsers();
      // solrconfig.xml里的valueSourceParser
      initValueSourceParsers();
      // solrconfig.xml里的transformer
      initTransformerFactories();
     
      // solrconfig.xml中得searchComponent
      this.searchComponents = Collections
          .unmodifiableMap(loadSearchComponents());
     
      // Processors initialized before the handlers
      // solrconfig.xml中的updateRequestProcessorChain
      updateProcessorChains = loadUpdateProcessorChains();
      // 初始化solrconfig.xml里的requestHandler,初始化solrconfig里面的handler,放RequestHandlers实例的handlers里,是一个ConcurrentHashMap
      reqHandlers = new RequestHandlers(this);
      reqHandlers.initHandlersFromConfig(solrConfig);

      // Handle things that should eventually go away
      initDeprecatedSupport();
     
      // cause the executor to stall so firstSearcher events won't fire
      // until after inform() has been called for all components.
      // searchExecutor must be single-threaded for this to work
      searcherExecutor.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
          latch.await();
          return null;
        }
      });
     
      // use the (old) writer to open the first searcher
      RefCounted<IndexWriter> iwRef = null;
      if (prev != null) {
        iwRef = prev.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
        if (iwRef != null) {
          final IndexWriter iw = iwRef.get();
          final SolrCore core = this;
          newReaderCreator = new Callable<DirectoryReader>() {
            // this is used during a core reload

            @Override
            public DirectoryReader call() throws Exception {
              if(getSolrConfig().nrtMode) {
                // if in NRT mode, need to open from the previous writer
                return indexReaderFactory.newReader(iw, core);
              } else {
                // if not NRT, need to create a new reader from the directory
                return indexReaderFactory.newReader(iw.getDirectory(), core);
              }
            }
          };
        }
      }
 
      // solrconfig.xml中的UpdateHandler片段
      String updateHandlerClass = solrConfig.getUpdateHandlerInfo().className;
     
      if (updateHandler == null) {
        this.updateHandler = createUpdateHandler(updateHandlerClass == null ? DirectUpdateHandler2.class
            .getName() : updateHandlerClass);
      } else {
        this.updateHandler = createUpdateHandler(
            updateHandlerClass == null ? DirectUpdateHandler2.class.getName()
                : updateHandlerClass, updateHandler);
      }
      infoRegistry.put("updateHandler", this.updateHandler);

      try {
        // 这里是重头戏,面是取得一个SolrIndexSearcher。
        getSearcher(false, false, null, true);
      } finally {
        newReaderCreator = null;
        if (iwRef != null) iwRef.decref();
      }
     
      // Finally tell anyone who wants to know
      resourceLoader.inform(resourceLoader);
      resourceLoader.inform(this); // last call before the latch is released.
    } catch (Throwable e) {
      latch.countDown();//release the latch, otherwise we block trying to do the close.  This should be fine, since counting down on a latch of 0 is still fine
      //close down the searcher and any other resources, if it exists, as this is not recoverable
      if (e instanceof OutOfMemoryError) {
        throw (OutOfMemoryError)e;
      }
      close();
     
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                              e.getMessage(), e);
    } finally {
      // allow firstSearcher events to fire and make sure it is released
      latch.countDown();
    }

    infoRegistry.put("core", this);
   
    // register any SolrInfoMBeans SolrResourceLoader initialized
    //
    // this must happen after the latch is released, because a JMX server impl may
    // choose to block on registering until properties can be fetched from an MBean,
    // and a SolrCoreAware MBean may have properties that depend on getting a Searcher
    // from the core.
    resourceLoader.inform(infoRegistry);
   
    CoreContainer cc = cd.getCoreContainer();

    if (cc != null && cc.isZooKeeperAware()) {
      SolrRequestHandler realtimeGetHandler = reqHandlers.get("/get");
      if (realtimeGetHandler == null) {
        log.warn("WARNING: RealTimeGetHandler is not registered at /get. " +
            "SolrCloud will always use full index replication instead of the more efficient PeerSync method.");
      }

      // ZK pre-Register would have already happened so we read slice properties now
      ClusterState clusterState = cc.getZkController().getClusterState();
      Slice slice = clusterState.getSlice(cd.getCloudDescriptor().getCollectionName(),
          cd.getCloudDescriptor().getShardId());
      if (Slice.CONSTRUCTION.equals(slice.getState())) {
        // set update log to buffer before publishing the core
        getUpdateHandler().getUpdateLog().bufferUpdates();
      }
    }
    // For debugging   
//    numOpens.incrementAndGet();
//    openHandles.put(this, new RuntimeException("unclosed core - name:" + getName() + " refs: " + refCount.get()));

    ruleExpiryLock = new ReentrantLock();
  }
上面这段代码基本上以注释的形式做了分析,下面重点看几个方法,首先是初始化DirectoryFactory方法,其实也没有什么特别:

   private void initDirectoryFactory() {
    DirectoryFactory dirFactory;
    // 从之前solrConfig实例化时看到DirectoryFactory类名对应的是directoryFactory属性片段
    PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
    if (info != null) {
      log.info(info.className);
      dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class);
      dirFactory.init(info.initArgs);
    } else {
      log.info("solr.NRTCachingDirectoryFactory");
      dirFactory = new NRTCachingDirectoryFactory();
    }
    // And set it
    directoryFactory = dirFactory;
  }
之后是initIndex方法,里面初始化了initReaderFactory,并且做了很多其他事情,比如校验索引目录下的锁文件是否被锁定等,这个方法涉及到一个常见的问题“Index locked for write for core”,详细代码可以见:

http://blog.youkuaiyun.com/wenchanter/article/details/37831825

这里先简单看下这个方法的代码:

  void initIndex(boolean reload) throws IOException {
      // 着里面用dataDir生成了一个Directory,并设置了锁工厂
      String indexDir = getNewIndexDir();
      boolean indexExists = getDirectoryFactory().exists(indexDir);
      boolean firstTime;
      synchronized (SolrCore.class) {
        firstTime = dirs.add(getDirectoryFactory().normalize(indexDir));
      }
      boolean removeLocks = solrConfig.unlockOnStartup;

      // solrconfig.xml中得indexReaderFactory,若不配置,也会默认StandardIndexReaderFactory
      initIndexReaderFactory();

      if (indexExists && firstTime && !reload) {
        // 除了上面那个getNewIndexDir中的用dataDir创建的Directory设置了lockFactory,这里又用其返回的index路径创建了一个Directory,并设置了一遍lockFactory,真正判断是否加锁的时用这个Directory
        Directory dir = directoryFactory.get(indexDir, DirContext.DEFAULT,
            getSolrConfig().indexConfig.lockType);
        try {
          if (IndexWriter.isLocked(dir)) {
            if (removeLocks) {
              log.warn(
                  logid
                      + "WARNING: Solr index directory '{}' is locked.  Unlocking...",
                  indexDir);
              IndexWriter.unlock(dir);
            } else {
              log.error(logid
                  + "Solr index directory '{}' is locked.  Throwing exception",
                  indexDir);
              throw new LockObtainFailedException(
                  "Index locked for write for core " + name);
            }
            
          }
        } finally {
          directoryFactory.release(dir);
        }
      }

      // Create the index if it doesn't exist.
      if(!indexExists) {
        log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
                + " Creating new index...");

        SolrIndexWriter writer = SolrIndexWriter.create("SolrCore.initIndex", indexDir, getDirectoryFactory(), true, 
                                                        getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
        writer.close();
      }
  }


下面initWriters方法,其实是对不同的wt的返回值类型,注册不同的QueryResponseWriter,其中除了可以在solrconfig.xml中的queryResponseWriter属性中自定义外,每个类型都有对应的默认类,看下这个方法以及一些默认的类信息:

  /** Configure the query response writers. There will always be a default writer; additional
   * writers may also be configured. */
  private void initWriters() {
    // use link map so we iterate in the same order
    Map<PluginInfo,QueryResponseWriter> writers = new LinkedHashMap<PluginInfo,QueryResponseWriter>();
    for (PluginInfo info : solrConfig.getPluginInfos(QueryResponseWriter.class.getName())) {
      try {
        QueryResponseWriter writer;
        String startup = info.attributes.get("startup") ;
        if( startup != null ) {
          if( "lazy".equals(startup) ) {
            log.info("adding lazy queryResponseWriter: " + info.className);
            writer = new LazyQueryResponseWriterWrapper(this, info.className, info.initArgs );
          } else {
            throw new Exception( "Unknown startup value: '"+startup+"' for: "+info.className );
          }
        } else {
          // 这里创建实例,但是没有调用init方法,下面一个循环统一init
          writer = createQueryResponseWriter(info.className);
        }
        writers.put(info,writer);
        // 注册,就是都存一个HashMap里:responseWriters
        QueryResponseWriter old = registerResponseWriter(info.name, writer);
        if(old != null) {
          log.warn("Multiple queryResponseWriter registered to the same name: " + info.name + " ignoring: " + old.getClass().getName());
        }
        if(info.isDefault()){
          if(defaultResponseWriter != null)
            log.warn("Multiple default queryResponseWriter registered, using: " + info.name);
          defaultResponseWriter = writer;
        }
        log.info("created "+info.name+": " + info.className);
      } catch (Exception ex) {
          SolrException e = new SolrException
            (SolrException.ErrorCode.SERVER_ERROR, "QueryResponseWriter init failure", ex);
          SolrException.log(log,null,e);
          throw e;
      }
    }

    // we've now registered all handlers, time to init them in the same order
    for (Map.Entry<PluginInfo,QueryResponseWriter> entry : writers.entrySet()) {
      PluginInfo info = entry.getKey();
      QueryResponseWriter writer = entry.getValue();
      responseWriters.put(info.name, writer);
      if (writer instanceof PluginInfoInitialized) {
        ((PluginInfoInitialized) writer).init(info);
      } else{
        writer.init(info.initArgs);
      }
    }

    NamedList emptyList = new NamedList();
    // 对没有配置的wt类型,使用默认的类并init
    for (Map.Entry<String, QueryResponseWriter> entry : DEFAULT_RESPONSE_WRITERS.entrySet()) {
      if(responseWriters.get(entry.getKey()) == null) {
        responseWriters.put(entry.getKey(), entry.getValue());
        // call init so any logic in the default writers gets invoked
        entry.getValue().init(emptyList);
      }
    }
    
    // configure the default response writer; this one should never be null
    if (defaultResponseWriter == null) {
      defaultResponseWriter = responseWriters.get("standard");
    }

  }

对这个queryResponseWriter,各有各得默认类:
  static{
    HashMap<String, QueryResponseWriter> m= new HashMap<String, QueryResponseWriter>();
    m.put("xml", new XMLResponseWriter());
    m.put("standard", m.get("xml"));
    m.put("json", new JSONResponseWriter());
    m.put("python", new PythonResponseWriter());
    m.put("php", new PHPResponseWriter());
    m.put("phps", new PHPSerializedResponseWriter());
    m.put("ruby", new RubyResponseWriter());
    m.put("raw", new RawResponseWriter());
    m.put("javabin", new BinaryResponseWriter());
    m.put("csv", new CSVResponseWriter());
    m.put("schema.xml", new SchemaXmlResponseWriter());
    DEFAULT_RESPONSE_WRITERS = Collections.unmodifiableMap(m);
  }

在接下来是initQParsers方法,这个是solrconfig.xml中的queryParser定义的片段,可以配置多个,是queryParser查询解析器,在查询时通过defType或者LocalParams(即{!}的形式)来使用,同样其有一些默认值:
  /** Configure the query parsers. */
  private void initQParsers() {
    // 初始化自定义的queryParser
    initPlugins(qParserPlugins,QParserPlugin.class);
    // default parsers
    for (int i=0; i<QParserPlugin.standardPlugins.length; i+=2) {
     try {
       String name = (String)QParserPlugin.standardPlugins[i];
       // 如果自定义里配置了与默认同名的,以自定义的为准,标准和自定义的都放在qParserPlugins这个hashmap里
       if (null == qParserPlugins.get(name)) {
         Class<QParserPlugin> clazz = (Class<QParserPlugin>)QParserPlugin.standardPlugins[i+1];
         QParserPlugin plugin = clazz.newInstance();
         qParserPlugins.put(name, plugin);
         plugin.init(null);
         infoRegistry.put(name, plugin);
       }
     } catch (Exception e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
     }
    }
  }
这些是queryParser得默认plugin:
  /**
   * Internal use - name to class mappings of builtin parsers.
   * Each query parser plugin extending {@link QParserPlugin} has own instance of standardPlugins.
   * This leads to cyclic dependencies of static fields and to case when NAME field is not yet initialized.
   * This result to NPE during initialization.
   * For every plugin, listed here, NAME field has to be final and static.
   */
  public static final Object[] standardPlugins = {
    LuceneQParserPlugin.NAME, LuceneQParserPlugin.class,
    OldLuceneQParserPlugin.NAME, OldLuceneQParserPlugin.class,
    FunctionQParserPlugin.NAME, FunctionQParserPlugin.class,
    PrefixQParserPlugin.NAME, PrefixQParserPlugin.class,
    BoostQParserPlugin.NAME, BoostQParserPlugin.class,
    DisMaxQParserPlugin.NAME, DisMaxQParserPlugin.class,
    ExtendedDismaxQParserPlugin.NAME, ExtendedDismaxQParserPlugin.class,
    FieldQParserPlugin.NAME, FieldQParserPlugin.class,
    RawQParserPlugin.NAME, RawQParserPlugin.class,
    TermQParserPlugin.NAME, TermQParserPlugin.class,
    NestedQParserPlugin.NAME, NestedQParserPlugin.class,
    FunctionRangeQParserPlugin.NAME, FunctionRangeQParserPlugin.class,
    SpatialFilterQParserPlugin.NAME, SpatialFilterQParserPlugin.class,
    SpatialBoxQParserPlugin.NAME, SpatialBoxQParserPlugin.class,
    JoinQParserPlugin.NAME, JoinQParserPlugin.class,
    SurroundQParserPlugin.NAME, SurroundQParserPlugin.class,
    SwitchQParserPlugin.NAME, SwitchQParserPlugin.class,
    MaxScoreQParserPlugin.NAME, MaxScoreQParserPlugin.class,
    BlockJoinParentQParserPlugin.NAME, BlockJoinParentQParserPlugin.class,
    BlockJoinChildQParserPlugin.NAME, BlockJoinChildQParserPlugin.class,
    CollapsingQParserPlugin.NAME, CollapsingQParserPlugin.class,
    SimpleQParserPlugin.NAME, SimpleQParserPlugin.class
  };

后面的initValuSourceParsers是对应初始化solrconfig.xml中的valueSourceParser属性,也可配置多个,就是我们查询时使用的函数(func),在查询时,可以通过“func”字段来使用:

  /** Configure the ValueSource (function) plugins */
  private void initValueSourceParsers() {
    // 初始化自定义的valueSourceParser
    initPlugins(valueSourceParsers,ValueSourceParser.class);
    // default value source parsers
    // 默认的func,在ValueSourceParser类初始化的时候,加入了80多个func,放valueSourceParsers这个HashMap里
    for (Map.Entry<String, ValueSourceParser> entry : ValueSourceParser.standardValueSourceParsers.entrySet()) {
      try {
        String name = entry.getKey();
        if (null == valueSourceParsers.get(name)) {
          ValueSourceParser valueSourceParser = entry.getValue();
          valueSourceParsers.put(name, valueSourceParser);
          valueSourceParser.init(null);
        }
      } catch (Exception e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
      }
    }
  }
这里的默认就不贴了,在ValueSourceParser的static块中加了很多func。

在下来是initTransformerFactories(),这个对应solrconfig.xml中的transformer片段,这个transformer可以用来修改查询返回结果的每个文档的格式,可以在fl中使用,具体用法可以参见:https://cwiki.apache.org/confluence/display/solr/Transforming+Result+Documents

  /** Configure the TransformerFactory plugins */
  private void initTransformerFactories() {
    // Load any transformer factories
    // 初始化自定义的transformer
    initPlugins(transformerFactories,TransformerFactory.class);
    
    // Tell each transformer what its name is
    // 默认的transformer,放transformerFactories这个HashMap里
    for( Map.Entry<String, TransformerFactory> entry : TransformerFactory.defaultFactories.entrySet() ) {
      try {
        String name = entry.getKey();
        if (null == valueSourceParsers.get(name)) {
          TransformerFactory f = entry.getValue();
          transformerFactories.put(name, f);
          // f.init(null); default ones don't need init
        }
      } catch (Exception e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
      }
    }
  }

看下默认的transformer:
  static {
    defaultFactories.put( "explain", new ExplainAugmenterFactory() );
    defaultFactories.put( "value", new ValueAugmenterFactory() );
    defaultFactories.put( "docid", new DocIdAugmenterFactory() );
    defaultFactories.put( "shard", new ShardAugmenterFactory() );
  }

searchComponent,这个被放在了一个不可变的map里,对应的solrconfig.xml中的searchComponent片段,不多说,这个在查询时会常打交道,先看下加载的代码以及一些默认值:

  /**
   * Register the default search components
   */
  private Map<String, SearchComponent> loadSearchComponents()
  {
    Map<String, SearchComponent> components = new HashMap<String, SearchComponent>();
    // 自定义的components,用name注册,用默认的名字将覆盖默认的component
    initPlugins(components,SearchComponent.class);
    for (Map.Entry<String, SearchComponent> e : components.entrySet()) {
      SearchComponent c = e.getValue();
      if (c instanceof HighlightComponent) {
        HighlightComponent hl = (HighlightComponent) c;
        if(!HighlightComponent.COMPONENT_NAME.equals(e.getKey())){
          components.put(HighlightComponent.COMPONENT_NAME,hl);
        }
        break;
      }
    }
    // 默认的component在这里注册,放一个不可变HashMap中:searchComponents
    addIfNotPresent(components,HighlightComponent.COMPONENT_NAME,HighlightComponent.class);
    addIfNotPresent(components,QueryComponent.COMPONENT_NAME,QueryComponent.class);
    addIfNotPresent(components,FacetComponent.COMPONENT_NAME,FacetComponent.class);
    addIfNotPresent(components,MoreLikeThisComponent.COMPONENT_NAME,MoreLikeThisComponent.class);
    addIfNotPresent(components,StatsComponent.COMPONENT_NAME,StatsComponent.class);
    addIfNotPresent(components,DebugComponent.COMPONENT_NAME,DebugComponent.class);
    addIfNotPresent(components,RealTimeGetComponent.COMPONENT_NAME,RealTimeGetComponent.class);
    return components;
  }

再后面的loadUpdateProcessorChains方法,对应加载solrconfig.xml中的updateRequestProcessorChain,没有什么特别,就是如果没有自定义也有个默认的链,而initHandlerFromConfig方法,对应的时solrconfig.xml中的所有Handler的初始化,先new了一个RequestHandlers实例,在调用里面的initHandlersFromConfig方法,没什么特别,就不再贴代码并以注释的形式做说明了。

下面直接看getSearcher(false, false, null, true);这句,这句是取得一个SolrIndexSearcher,对于启动时来说,是创建一个新的SolrIndexSearcher,里面做了很多工作。这个下片文章在分析。


最后有些SolrCore里面的属性,可以通过变量名了解大概其作用,有些加了注释

  private boolean isReloaded = false;
  final List<SolrEventListener> firstSearcherListeners = new ArrayList<SolrEventListener>();
  final List<SolrEventListener> newSearcherListeners = new ArrayList<SolrEventListener>();
  // this core current usage count
  private final AtomicInteger refCount = new AtomicInteger(1);

  private Collection<CloseHook> closeHooks = null;
  // All of the normal open searchers.  Don't access this directly.
  // protected by synchronizing on searcherLock.
  private final LinkedList<RefCounted<SolrIndexSearcher>> _searchers = new LinkedList<RefCounted<SolrIndexSearcher>>();
  private final LinkedList<RefCounted<SolrIndexSearcher>> _realtimeSearchers = new LinkedList<RefCounted<SolrIndexSearcher>>();
  // Lock ordering: one can acquire the openSearcherLock and then the searcherLock, but not vice-versa.
  private Object searcherLock = new Object();  // the sync object for the searcher
  private ReentrantLock openSearcherLock = new ReentrantLock(true);     // used to serialize opens/reopens for absolute ordering

  // protect via synchronized(SolrCore.class)
  private static Set<String> dirs = new HashSet<String>();// 存indexDir ……/core1/data/index

  private final Map<String, QueryResponseWriter> responseWriters = new HashMap<String, QueryResponseWriter>();// 存queryResponseWriter

  private final Map<String, QParserPlugin> qParserPlugins = new HashMap<String, QParserPlugin>();//存QParserPlugin,有标准的,还有自定义的queryParser

  private final HashMap<String, ValueSourceParser> valueSourceParsers = new HashMap<String, ValueSourceParser>();

  private final HashMap<String, TransformerFactory> transformerFactories = new HashMap<String, TransformerFactory>();
  private final Map<String,SearchComponent> searchComponents;

(待续)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值