lucene数据写入-03数据刷盘

前两篇文章我们已经分析了倒排数据是如何组织的,现在我们看下数据如何保存到文件中
最后调用indexWriter.close(),indexWriter执行关闭操作,将内存数据写入文件并刷盘

flush(true, true);
final void flush(boolean triggerMerge, boolean applyAllDeletes) throws IOException {
   
    ensureOpen(false);
    if (doFlush(applyAllDeletes) && triggerMerge) {
   
      //需要merge
      maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
    }
  }

private boolean doFlush(boolean applyAllDeletes) throws IOException {
   
    if (tragedy.get() != null) {
   
      throw new IllegalStateException("this writer hit an unrecoverable error; cannot flush", tragedy.get());
    }
    //刷盘前回调
    doBeforeFlush();
    testPoint("startDoFlush");
    boolean success = false;
    try {
   

      if (infoStream.isEnabled("IW")) {
   
        infoStream.message("IW", "  start flush: applyAllDeletes=" + applyAllDeletes);
        infoStream.message("IW", "  index before flush " + segString());
      }
      boolean anyChanges = false;
      
      synchronized (fullFlushLock) {
   
        boolean flushSuccess = false;
        try {
   
          //提交
          long seqNo = docWriter.flushAllThreads();
          if (seqNo < 0) {
   
            seqNo = -seqNo;
            anyChanges = true;
          } else {
   
            anyChanges = false;
          }
          if (!anyChanges) {
   
            // flushCount is incremented in flushAllThreads
            flushCount.incrementAndGet();
          }
          //发布flush的段
          publishFlushedSegments(true);
          flushSuccess = true;
        } finally {
   
          assert holdsFullFlushLock();
          docWriter.finishFullFlush(flushSuccess);
          processEvents(false);
        }
      }

      if (applyAllDeletes) {
   
        applyAllDeletesAndUpdates();
      }

      anyChanges |= maybeMerge.getAndSet(false);
      
      synchronized(this) {
   
        writeReaderPool(applyAllDeletes);
        doAfterFlush();
        success = true;
        return anyChanges;
      }
    } catch (VirtualMachineError tragedy) {
   
      tragicEvent(tragedy, "doFlush");
      throw tragedy;
    } finally {
   
      if (!success) {
   
        if (infoStream.isEnabled("IW")) {
   
          infoStream.message("IW", "hit exception during flush");
        }
        maybeCloseOnTragicEvent();
      }
    }
  }

继续跟踪执行org.apache.lucene.index.DocumentsWriter#doFlush方法,最终调用org.apache.lucene.index.DefaultIndexingChain#flush方法

public Sorter.DocMap flush(SegmentWriteState state) throws IOException {
   

    // NOTE: caller (DocumentsWriterPerThread) handles
    // aborting on any exception from this method
    Sorter.DocMap sortMap = maybeSortSegment(state);
    int maxDoc = state.segmentInfo.maxDoc();
    long t0 = System.nanoTime();
    //写入norms
    writeNorms(state, sortMap);
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write norms");
    }
    SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix, Collections.emptyMap());
    
    t0 = System.nanoTime();
    //写入doc values
    writeDocValues(state, sortMap);
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write docValues");
    }

    t0 = System.nanoTime();
    //写入points
    writePoints(state, sortMap);
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write points");
    }
    
    // it's possible all docs hit non-aborting exceptions...
    t0 = System.nanoTime();
    storedFieldsConsumer.finish(maxDoc);
    storedFieldsConsumer.flush(state, sortMap);
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to finish stored fields");
    }

    t0 = System.nanoTime();
    Map<String,TermsHashPerField> fieldsToFlush = new HashMap<>();
    for (int i=0;i<fieldHash.length;i++) {
   
      PerField perField = fieldHash[i];
      while (perField != null) {
   
        if (perField.invertState != null) {
   
          fieldsToFlush.put(perField.fieldInfo.name, perField.termsHashPerField);
        }
        perField = perField.next;
      }
    }

    try (NormsProducer norms = readState.fieldInfos.hasNorms()
        ? state.segmentInfo.getCodec().normsFormat().normsProducer(readState)
        : null) {
   
      NormsProducer normsMergeInstance = null;
      if (norms != null) {
   
        // Use the merge instance in order to reuse the same IndexInput for all terms
        normsMergeInstance = norms.getMergeInstance();
      }
      //倒排数据写入
      termsHash.flush(fieldsToFlush, state, sortMap, normsMergeInstance);
    }
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write postings and finish vectors");
    }

    // Important to save after asking consumer to flush so
    // consumer can alter the FieldInfo* if necessary.  EG,
    // FreqProxTermsWriter does this with
    // FieldInfo.storePayload.
    t0 = System.nanoTime();
    //字段信息写入
    docWriter.codec.fieldInfosFormat().write(state.directory, state.segmentInfo, "", state.fieldInfos, IOContext.DEFAULT);
    if (docState.infoStream.isEnabled("IW")) {
   
      docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write fieldInfos");
    }

    return sortMap;
  }

这里可以看到很多种信息的写入我们主要关系的是倒排数据是如何写入的,其他写入有兴趣的同学可以自行查看。

倒排数据写入入口

//倒排数据写入
termsHash.flush(fieldsToFlush, state, sortMap, normsMergeInstance);

继续往下执行org.apache.lucene.index.FreqProxTermsWriter#flush

public void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state,
      Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
   
    super.flush(fieldsToFlush, state, sortMap, norms);

    // Gather all fields that saw any postings:
    List<FreqProxTermsWriterPerField> allFields = new ArrayList<>();

    for (TermsHashPerField f : fieldsToFlush.values()) {
   
      final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) f;
      if (perField.bytesHash.size() > 0) {
   
        perField.sortPostings();
        assert perField.fieldInfo.getIndexOptions() != IndexOptions.NONE;
        allFields.add(perField);
      }
    }

    // Sort by field name
    //根据字段名排序
    CollectionUtil.introSort(allFields);

    Fields fields = new FreqProxFields(allFields);
    applyDeletes(state, fields);
    if (sortMap != null) {
   
      fields = new SortingLeafReader.SortingFields(fields, state.fieldInfos, sortMap);
    }

    FieldsConsumer consumer = state.segmentInfo.getCodec().postingsFormat().fieldsConsumer(state);
    boolean success = false;
    try {
   
      //写入数据
      consumer.write(fields, norms);
      success = true;
    } finally {
   
      if (success) {
   
        IOUtils.close(consumer);
      } else {
   
        IOUtils.closeWhileHandlingException(consumer);
      }
    }

  }

写入前会对字段进行排序,然后获取FieldsConsumer写入数据,这里会调用org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter#write方法

public void write(Fields fields, NormsProducer norms) throws IOException {
   
    //if (DEBUG) System.out.println("\nBTTW.write seg=" + segment);

    String lastField = null;
    for(String field : fields) {
   
      assert lastField == null || lastField.compareTo(field) < 0;
      lastField = field;

      //if (DEBUG) System.out.println("\nBTTW.write seg=" + segment + " field=" + field);
      Terms terms = fields.terms(field);
      if (terms == null) {
   
        continue;
      }
	  //迭代每隔term分词
      TermsEnum termsEnum = terms.iterator();
      TermsWriter termsWriter = new TermsWriter(fieldInfos.fieldInfo(field));
      while (true) {
   
        //获取分词信息
        BytesRef term = termsEnum.next();
        //if (DEBUG) System.out.println("BTTW: next term " + term);

        if (term == null) {
   
          break;
        }

        //if (DEBUG) System.out.println("write field=" + fieldInfo.name + " term=" + brToString(term));
        //记录term信息
        termsWriter.write(term, termsEnum, norms);
      }
      //构建索引结构
      termsWriter.finish();

      //if (DEBUG) System.out.println("\nBTTW.write done seg=" + segment + " field=" + field);
    }
  }

可以看到循环遍历每个字段,然后获取每个字段的分词信息执行写入,我们分段来看上面的代码

TermsEnum termsEnum = terms.iterator();

调用的是org.apache.lucene.index.FreqProxFields.FreqProxTerms#iterator方法

public TermsEnum iterator() {
   
    FreqProxTermsEnum termsEnum = 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值