lucene-core-2.9.4-dev

fset();
      docStoreSegment = si.getDocStoreSegment();
      docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
    }
    if ((mergeDocStores) && (doFlushDocStore))
    {
      if (infoStream != null) {
        message("now flush at merge");
      }
      doFlush(true, false);
    }
    mergeDocStores = mergeDocStores;
    
    info = new SegmentInfo(newSegmentName(), 0, directory, false, true, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, false);
    
    Map details = new HashMap();
    details.put("optimize", optimize + "");
    details.put("mergeFactor", end + "");
    details.put("mergeDocStores", mergeDocStores + "");
    setDiagnostics(info, "merge", details);
    
    mergingSegments.add(info);
  }
  
  private void setDiagnostics(SegmentInfo info, String source)
  {
    setDiagnostics(info, source, null);
  }
  
  private void setDiagnostics(SegmentInfo info, String source, Map details)
  {
    Map diagnostics = new HashMap();
    diagnostics.put("source", source);
    diagnostics.put("lucene.version", Constants.LUCENE_VERSION);
    diagnostics.put("os", Constants.OS_NAME + "");
    diagnostics.put("os.arch", Constants.OS_ARCH + "");
    diagnostics.put("os.version", Constants.OS_VERSION + "");
    diagnostics.put("java.version", Constants.JAVA_VERSION + "");
    diagnostics.put("java.vendor", Constants.JAVA_VENDOR + "");
    if (details != null) {
      diagnostics.putAll(details);
    }
    info.setDiagnostics(diagnostics);
  }
  
  /**
   * @deprecated
   */
  private synchronized boolean doCommitBeforeMergeCFS(MergePolicy.OneMerge merge)
    throws IOException
  {
    long freeableBytes = 0L;
    int size = segments.size();
    for (int i = 0; i < size; i++)
    {
      SegmentInfo info = segments.info(i);
      
      Integer loc = (Integer)rollbackSegments.get(info);
      if (loc != null)
      {
        SegmentInfo oldInfo = rollbackSegmentInfos.info(loc.intValue());
        if (oldInfo.getUseCompoundFile() != info.getUseCompoundFile()) {
          freeableBytes += info.sizeInBytes();
        }
      }
    }
    long totalBytes = 0L;
    int numSegments = segmentInfos.size();
    for (int i = 0; i < numSegments; i++) {
      totalBytes += segmentInfos.info(i).sizeInBytes();
    }
    if (3L * freeableBytes > totalBytes) {
      return true;
    }
    return false;
  }
  
  final synchronized void mergeFinish(MergePolicy.OneMerge merge)
    throws IOException
  {
    notifyAll();
    if (registerDone)
    {
      SegmentInfos sourceSegments = segments;
      int end = sourceSegments.size();
      for (int i = 0; i < end; i++) {
        mergingSegments.remove(sourceSegments.info(i));
      }
      mergingSegments.remove(info);
      registerDone = false;
    }
    runningMerges.remove(merge);
  }
  
  private synchronized void setMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge)
  {
    String mergeDocStoreSegment = info.getDocStoreSegment();
    if ((mergeDocStoreSegment != null) && (!info.getDocStoreIsCompoundFile()))
    {
      int size = segmentInfos.size();
      for (int i = 0; i < size; i++)
      {
        SegmentInfo info = segmentInfos.info(i);
        String docStoreSegment = info.getDocStoreSegment();
        if ((docStoreSegment != null) && (docStoreSegment.equals(mergeDocStoreSegment)) && (info.getDocStoreIsCompoundFile()))
        {
          info.setDocStoreIsCompoundFile(true);
          break;
        }
      }
    }
  }
  
  private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions)
    throws IOException
  {
    int numSegments = segments.size();
    if (suppressExceptions) {
      for (int i = 0; i < numSegments; i++)
      {
        if (readers[i] != null)
        {
          try
          {
            readerPool.release(readers[i], false);
          }
          catch (Throwable t) {}
          readers[i] = null;
        }
        if (readersClone[i] != null)
        {
          try
          {
            readersClone[i].close();
          }
          catch (Throwable t) {}
          assert (readersClone[i].getRefCount() == 0) : ("refCount should be 0 but is " + readersClone[i].getRefCount());
          readersClone[i] = null;
        }
      }
    } else {
      for (int i = 0; i < numSegments; i++)
      {
        if (readers[i] != null)
        {
          readerPool.release(readers[i], true);
          readers[i] = null;
        }
        if (readersClone[i] != null)
        {
          readersClone[i].close();
          
          assert (readersClone[i].getRefCount() == 0);
          readersClone[i] = null;
        }
      }
    }
  }
  
  private final int mergeMiddle(MergePolicy.OneMerge merge)
    throws CorruptIndexException, IOException
  {
    merge.checkAborted(directory);
    
    String mergedName = info.name;
    
    SegmentMerger merger = null;
    
    int mergedDocCount = 0;
    
    SegmentInfos sourceSegments = segments;
    int numSegments = sourceSegments.size();
    if (infoStream != null) {
      message("merging " + merge.segString(directory));
    }
    merger = new SegmentMerger(this, mergedName, merge);
    
    readers = new SegmentReader[numSegments];
    readersClone = new SegmentReader[numSegments];
    
    boolean mergeDocStores = false;
    
    Set dss = new HashSet();
    String currentDocStoreSegment;
    synchronized (this)
    {
      currentDocStoreSegment = docWriter.getDocStoreSegment();
    }
    boolean currentDSSMerged = false;
    
    boolean success = false;
    try
    {
      int totDocCount = 0;
      for (int i = 0; i < numSegments; i++)
      {
        SegmentInfo info = sourceSegments.info(i);
        
        SegmentReader reader = readers[i] = readerPool.get(info, mergeDocStores, 4096, -1);
        
        SegmentReader clone = readersClone[i] = (SegmentReader)reader.clone(true);
        merger.add(clone);
        if (clone.hasDeletions()) {
          mergeDocStores = true;
        }
        if ((info.getDocStoreOffset() != -1) && (currentDocStoreSegment != null)) {
          currentDSSMerged |= currentDocStoreSegment.equals(info.getDocStoreSegment());
        }
        totDocCount += clone.numDocs();
      }
      if (infoStream != null) {
        message("merge: total " + totDocCount + " docs");
      }
      merge.checkAborted(directory);
      if ((mergeDocStores) && (!mergeDocStores))
      {
        mergeDocStores = true;
        synchronized (this)
        {
          if (currentDSSMerged)
          {
            if (infoStream != null) {
              message("now flush at mergeMiddle");
            }
            doFlush(true, false);
          }
        }
        for (int i = 0; i < numSegments; i++) {
          readersClone[i].openDocStores();
        }
        info.setDocStore(-1, null, false);
      }
      mergedDocCount = info.docCount = merger.merge(mergeDocStores);
      
      assert (mergedDocCount == totDocCount);
      String compoundFileName;
      if (useCompoundFile)
      {
        success = false;
        compoundFileName = IndexFileNames.segmentFileName(mergedName, "cfs");
        try
        {
          if (infoStream != null) {
            message("create compound file " + compoundFileName);
          }
          merger.createCompoundFile(compoundFileName);
          success = true;
        }
        catch (IOException ioe)
        {
          synchronized (this)
          {
            if (!merge.isAborted()) {
              handleMergeException(ioe, merge);
            }
          }
        }
        catch (Throwable t)
        {
          handleMergeException(t, merge);
        }
        finally
        {
          if (!success)
          {
            if (infoStream != null) {
              message("hit exception creating compound file during merge");
            }
            synchronized (this)
            {
              deleter.deleteFile(compoundFileName);
              deleter.deleteNewFiles(merger.getMergedFiles());
            }
          }
        }
        success = false;
      }
      boolean canWarm;
      int termsIndexDivisor;
      boolean loadDocStores;
      int termsIndexDivisor;
      boolean loadDocStores;
      SegmentReader mergedReader;
      synchronized (this)
      {
        deleter.deleteNewFiles(merger.getMergedFiles());
        if (merge.isAborted())
        {
          if (infoStream != null) {
            message("abort merge after building CFS");
          }
          deleter.deleteFile(compoundFileName);
          ??? = 0;jsr 233;return ???;
        }
      }
      success = true;
    }
    finally
    {
      if (!success) {
        closeMergeReaders(merge, true);
      }
    }
    mergeDone = true;
    synchronized (mergeScheduler)
    {
      mergeScheduler.notifyAll();
    }
    if (autoCommit)
    {
      long size;
      synchronized (this)
      {
        size = info.sizeInBytes();
      }
      commit(size);
    }
    return mergedDocCount;
  }
  
  synchronized void addMergeException(MergePolicy.OneMerge merge)
  {
    assert (merge.getException() != null);
    if ((!mergeExceptions.contains(merge)) && (mergeGen == mergeGen)) {
      mergeExceptions.add(merge);
    }
  }
  
  private final synchronized boolean applyDeletes()
    throws CorruptIndexException, IOException
  {
    assert (testPoint("startApplyDeletes"));
    flushDeletesCount += 1;
    boolean success = false;
    boolean changed;
    try
    {
      changed = docWriter.applyDeletes(segmentInfos);
      success = true;
    }
    finally
    {
      if ((!success) && (infoStream != null)) {
        message("hit exception flushing deletes");
      }
    }
    if (changed) {
      checkpoint();
    }
    return changed;
  }
  
  final synchronized int getBufferedDeleteTermsSize()
  {
    return docWriter.getBufferedDeleteTerms().size();
  }
  
  final synchronized int getNumBufferedDeleteTerms()
  {
    return docWriter.getNumBufferedDeleteTerms();
  }
  
  SegmentInfo newestSegment()
  {
    return segmentInfos.size() > 0 ? segmentInfos.info(segmentInfos.size() - 1) : null;
  }
  
  public synchronized String segString()
  {
    return segString(segmentInfos);
  }
  
  private synchronized String segString(SegmentInfos infos)
  {
    StringBuffer buffer = new StringBuffer();
    int count = infos.size();
    for (int i = 0; i < count; i++)
    {
      if (i > 0) {
        buffer.append(' ');
      }
      SegmentInfo info = infos.info(i);
      buffer.append(info.segString(directory));
      if (dir != directory) {
        buffer.append("**");
      }
    }
    return buffer.toString();
  }
  
  private final HashSet synced = new HashSet();
  private HashSet syncing = new HashSet();
  private IndexReaderWarmer mergedSegmentWarmer;
  private boolean allowMinus1Position;
  
  private boolean startSync(String fileName, Collection pending)
  {
    synchronized (synced)
    {
      if (!synced.contains(fileName))
      {
        if (!syncing.contains(fileName))
        {
          syncing.add(fileName);
          return true;
        }
        pending.add(fileName);
        return false;
      }
      return false;
    }
  }
  
  private void finishSync(String fileName, boolean success)
  {
    synchronized (synced)
    {
      assert (syncing.contains(fileName));
      syncing.remove(fileName);
      if (success) {
        synced.add(fileName);
      }
      synced.notifyAll();
    }
  }
  
  private boolean waitForAllSynced(Collection syncing)
    throws IOException
  {
    synchronized (synced)
    {
      Iterator it = syncing.iterator();
      while (it.hasNext())
      {
        String fileName = (String)it.next();
        while (!synced.contains(fileName))
        {
          if (!syncing.contains(fileName)) {
            return false;
          }
          try
          {
            synced.wait();
          }
          catch (InterruptedException ie)
          {
            Thread.currentThread().interrupt();
            throw new RuntimeException(ie);
          }
        }
      }
      return true;
    }
  }
  
  /**
   * @deprecated
   */
  private void syncPause(long sizeInBytes)
  {
    if (((mergeScheduler instanceof ConcurrentMergeScheduler)) && (maxSyncPauseSeconds > 0.0D))
    {
      long pauseTime = 1000L * sizeInBytes / 10L / 1024L / 1024L;
      long maxPauseTime = (maxSyncPauseSeconds * 1000.0D);
      if (pauseTime > maxPauseTime) {
        pauseTime = maxPauseTime;
      }
      int sleepCount = (int)(pauseTime / 100L);
      for (int i = 0; i < sleepCount; i++)
      {
        synchronized (this)
        {
          if ((stopMerges) || (closing)) {
            break;
          }
        }
        try
        {
          Thread.sleep(100L);
        }
        catch (InterruptedException ie)
        {
          Thread.currentThread().interrupt();
          throw new RuntimeException(ie);
        }
      }
    }
  }
  
  private synchronized void doWait()
  {
    try
    {
      wait(1000L);
    }
    catch (InterruptedException ie)
    {
      Thread.currentThread().interrupt();
      throw new RuntimeException(ie);
    }
  }
  
  private void startCommit(long sizeInBytes, Map commitUserData)
    throws IOException
  {
    assert (testPoint("startStartCommit"));
    if (hitOOM) {
      throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
    }
    try
    {
      if (infoStream != null) {
        message("startCommit(): start sizeInBytes=" + sizeInBytes);
      }
      if (sizeInBytes > 0L) {
        syncPause(sizeInBytes);
      }
      SegmentInfos toSync = null;
      long myChangeCount;
      synchronized (this)
      {
        if ((sizeInBytes > 0L) && (stopMerges)) {
          return;
        }
        blockAddIndexes(false);
        
        assert (!hasExternalSegments());
        try
        {
          assert (lastCommitChangeCount <= changeCount);
          myChangeCount = changeCount;
          if (changeCount == lastCommitChangeCount)
          {
            if (infoStream != null) {
              message("  skip startCommit(): no changes pending");
            }
            jsr 307;return;
          }
          if (infoStream != null) {
            message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount);
          }
          readerPool.commit();
          
          toSync = (SegmentInfos)segmentInfos.clone();
          String dss = docWriter.getDocStoreSegment();
          if (dss != null) {
            for (;;)
            {
              String dss2 = toSync.info(toSync.size() - 1).getDocStoreSegment();
              if ((dss2 == null) || (!dss2.equals(dss))) {
                break;
              }
              toSync.remove(toSync.size() - 1);
              changeCount += 1L;
            }
          }
          if (commitUserData != null) {
            toSync.setUserData(commitUserData);
          }
          deleter.incRef(toSync, false);
          
          Iterator it = toSync.files(directory, false).iterator();
          while (it.hasNext())
          {
            String fileName = (String)it.next();
            assert (directory.fileExists(fileName)) : ("file " + fileName + " does not exist");
            
            assert (deleter.exists(fileName));
          }
        }
        finally
        {
          jsr 6;
        }
        localObject2 = returnAddress;resumeAddIndexes();ret;
      }
      assert (testPoint("midStartCommit"));
      
      boolean setPending = false;
      try
      {
        for (;;)
        {
          Collection pending = new ArrayList();
          
          Iterator it = toSync.files(directory, false).iterator();
          while (it.hasNext())
          {
            String fileName = (String)it.next();
            if (startSync(fileName, pending))
            {
              boolean success = false;
              try
              {
                assert (directory.fileExists(fileName)) : ("file '" + fileName + "' does not exist dir=" + directory);
                if (infoStream != null) {
                  message("now sync " + fileName);
                }
                directory.sync(fileName);
                success = true;
              }
              finally
              {
                finishSync(fileName, success);
              }
            }
          }
          if (waitForAllSynced(pending)) {
            break;
          }
        }
        assert (testPoint("midStartCommit2"));
        synchronized (this)
        {
          for (;;)
          {
            if (myChangeCount <= lastCommitChangeCount)
            {
              if (infoStream == null) {
                break;
              }
              message("sync superseded by newer infos"); break;
            }
            if (pendingCommit == null)
            {
              if (segmentInfos.getGeneration() > toSync.getGeneration()) {
                toSync.updateGeneration(segmentInfos);
              }
              boolean success = false;
              try
              {
                try
                {
                  toSync.prepareCommit(directory);
                }
                finally
                {
                  segmentInfos.updateGeneration(toSync);
                }
                assert (pendingCommit == null);
                setPending = true;
                pendingCommit = toSync;
                pendingCommitChangeCount = myChangeCount;
                success = true;
              }
              finally
              {
                if ((!success) && (infoStream != null)) {
                  message("hit exception committing segments file");
                }
              }
              break;
            }
            doWait();
          }
        }
        if (infoStream != null) {
          message("done all syncs");
        }
        if ((!$assertionsDisabled) && (!testPoint("midStartCommitSuccess"))) {
          throw new AssertionError();
        }
      }
      finally
      {
        synchronized (this)
        {
          if (!setPending) {
            deleter.decRef(toSync);
          }
        }
      }
    }
    catch (OutOfMemoryError oom)
    {
      handleOOM(oom, "startCommit");
    }
    assert (testPoint("finishStartCommit"));
  }
  
  public static boolean isLocked(Directory directory)
    throws IOException
  {
    return directory.makeLock("write.lock").isLocked();
  }
  
  /**
   * @deprecated
   */
  public static boolean isLocked(String directory)
    throws IOException
  {
    Directory dir = FSDirectory.getDirectory(directory);
    try
    {
      return isLocked(dir);
    }
    finally
    {
      dir.close();
    }
  }
  
  public static void unlock(Directory directory)
    throws IOException
  {
    directory.makeLock("write.lock").release();
  }
  
  public static final class MaxFieldLength
  {
    private int limit;
    private String name;
    
    private MaxFieldLength(String name, int limit)
    {
      this.name = name;
      this.limit = limit;
    }
    
    public MaxFieldLength(int limit)
    {
      this("User-specified", limit);
    }
    
    public int getLimit()
    {
      return limit;
    }
    
    public String toString()
    {
      return name + ":" + limit;
    }
    
    public static final MaxFieldLength UNLIMITED = new MaxFieldLength("UNLIMITED", Integer.MAX_VALUE);
    public static final MaxFieldLength LIMITED = new MaxFieldLength("LIMITED", 10000);
  }
  
  public void setMergedSegmentWarmer(IndexReaderWarmer warmer)
  {
    mergedSegmentWarmer = warmer;
  }
  
  public IndexReaderWarmer getMergedSegmentWarmer()
  {
    return mergedSegmentWarmer;
  }
  
  private void handleOOM(OutOfMemoryError oom, String location)
  {
    if (infoStream != null) {
      message("hit OutOfMemoryError inside " + location);
    }
    hitOOM = true;
    throw oom;
  }
  
  public void setAllowMinus1Position()
  {
    allowMinus1Position = true;
    docWriter.setAllowMinus1Position();
  }
  
  boolean getAllowMinus1Position()
  {
    return allowMinus1Position;
  }
  
  boolean testPoint(String name)
  {
    return true;
  }
  
  synchronized boolean nrtIsCurrent(SegmentInfos infos)
  {
    if (!infos.equals(segmentInfos)) {
      return false;
    }
    if (infos.getGeneration() != segmentInfos.getGeneration()) {
      return false;
    }
    return !docWriter.anyChanges();
  }
  
  synchronized boolean isClosed()
  {
    return closed;
  }
  
  protected void doAfterFlush()
    throws IOException
  {}
  
  protected void doBeforeFlush()
    throws IOException
  {}
  
  /* Error */
  final void merge(MergePolicy.OneMerge merge)
    throws CorruptIndexException, IOException
  {
    // Byte code:
    //   0: iconst_0
    //   1: istore_2
    //   2: aload_0
    //   3: aload_1
    //   4: invokevirtual 545	org/apache/lucene/index/IndexWriter:mergeInit	(Lorg/apache/lucene/index/MergePolicy$OneMerge;)V
    //   7: aload_0
    //   8: getfield 13	org/apache/lucene/index/IndexWriter:infoStream	Ljava/io/PrintStream;
    //   11: ifnull +57 -> 68
    //   14: aload_0
    //   15: new 41	java/lang/StringBuffer
    //   18: dup
    //   19: invokespecial 42	java/lang/StringBuffer:<init>	()V
    //   22: ldc_w 546
    //   25: invokevirtual 44	java/lang/StringBuffer:append	(Ljava/lang/String;)Ljava/lang/StringBuffer;
    //   28: aload_1
    //   29: aload_0
    //   30: getfield 1	org/apache/lucene/index/IndexWriter:directory	Lorg/apache/lucene/store/Directory;
    //   33: invokevirtual 313	org/apache/lucene/index/MergePolicy$OneMerge:segString	(Lorg/apache/lucene/store/Directory;)Ljava/lang/String;
    //   36: invokevirtual 44	java/lang/StringBuffer:append	(Ljava/lang/String;)Ljava/lang/StringBuffer;
    //   39: ldc_w 547
    //   42: invokevirtual 44	java/lang/StringBuffer:append	(Ljava/lang/String;)Ljava/lang/StringBuffer;
    //   45: aload_1
    //   46: invokevirtual 50	java/lang/StringBuffer:append	(Ljava/lang/Object;)Ljava/lang/StringBuffer;
    //   49: ldc_w 548
    //   52: invokevirtual 44	java/lang/StringBuffer:append	(Ljava/lang/String;)Ljava/lang/StringBuffer;
    //   55: aload_0
    //   56: invokevirtual 219	org/apache/lucene/index/IndexWriter:segString	()Ljava/lang/String;
    //   59: invokevirtual 44	java/lang/StringBuffer:append	(Ljava/lang/String;)Ljava/lang/StringBuffer;
    //   62: invokevirtual 54	java/lang/StringBuffer:toString	()Ljava/lang/String;
    //   65: invokevirtual 15	org/apache/lucene/index/IndexWriter:message	(Ljava/lang/String;)V
    //   68: aload_0
    //   69: aload_1
    //   70: invokespecial 549	org/apache/lucene/index/IndexWriter:mergeMiddle	(Lorg/apache/lucene/index/MergePolicy$OneMerge;)I
    //   73: pop
    //   74: aload_0
    //   75: aload_1
    //   76: invokevirtual 550	org/apache/lucene/index/IndexWriter:mergeSuccess	(Lorg/apache/lucene/index/MergePolicy$OneMerge;)V
    //   79: iconst_1
    //   80: istore_2
    //   81: goto +10 -> 91
    //   84: astore_3
    //   85: aload_0
    //   86: aload_3
    //   87: aload_1
    //   88: invokespecial 551	org/apache/lucene/index/IndexWriter:handleMergeException	(Ljava/lang/Throwable;Lorg/apache/lucene/index/MergePolicy$OneMerge;)V
    //   91: jsr +14 -> 105
    //   94: goto +129 -> 223
    //   97: astore 4
    //   99: jsr +6 -> 105
    //   102: aload 4
    //   104: athrow
    //   105: astore 5
    //   107: aload_0
    //   108: dup
    //   109: astore 6
    //   111: monitorenter
    //   112: aload_0
    //   113: aload_1
    //   114: invokevirtual 375	org/apache/lucene/index/IndexWriter:mergeFinish	(Lorg/apache/lucene/index/MergePolicy$OneMerge;)V
    //   117: iload_2
    //   118: ifne +52 -> 170
    //   121: aload_0
    //   122: getfield 13	org/apache/lucene/index/IndexWriter:infoStream	Ljava/io/PrintStream;
    //   125: ifnull +10 -> 135
    //   128: aload_0
    //   129: ldc_w 552
    //   132: invokevirtual 15	org/apache/lucene/index/IndexWriter:message	(Ljava/lang/String;)V
    //   135: aload_1
    //   136: getfield 532	org/apache/lucene/index/MergePolicy$OneMerge:info	Lorg/apache/lucene/index/SegmentInfo;
    //   139: ifnull +31 -> 170
    //   142: aload_0
    //   143: getfield 5	org/apache/lucene/index/IndexWriter:segmentInfos	Lorg/apache/lucene/index/SegmentInfos;
    //   146: aload_1
    //   147: getfield 532	org/apache/lucene/index/MergePolicy$OneMerge:info	Lorg/apache/lucene/index/SegmentInfo;
    //   150: invokevirtual 389	org/apache/lucene/index/SegmentInfos:contains	(Ljava/lang/Object;)Z
    //   153: ifne +17 -> 170
    //   156: aload_0
    //   157: getfield 3	org/apache/lucene/index/IndexWriter:deleter	Lorg/apache/lucene/index/IndexFileDeleter;
    //   160: aload_1
    //   161: getfield 532	org/apache/lucene/index/MergePolicy$OneMerge:info	Lorg/apache/lucene/index/SegmentInfo;
    //   164: getfield 406	org/apache/lucene/index/SegmentInfo:name	Ljava/lang/String;
    //   167: invokevirtual 487	org/apache/lucene/index/IndexFileDeleter:refresh	(Ljava/lang/String;)V
    //   170: iload_2
    //   171: ifeq +36 -> 207
    //   174: aload_1
    //   175: invokevirtual 523	org/apache/lucene/index/MergePolicy$OneMerge:isAborted	()Z
    //   178: ifne +29 -> 207
    //   181: aload_0
    //   182: getfield 34	org/apache/lucene/index/IndexWriter:closed	Z
    //   185: ifne +22 -> 207
    //   188: aload_0
    //   189: getfield 35	org/apache/lucene/index/IndexWriter:closing	Z
    //   192: ifne +15 -> 207
    //   195: aload_0
    //   196: aload_1
    //   197: getfield 305	org/apache/lucene/index/MergePolicy$OneMerge:maxNumSegmentsOptimize	I
    //   200: aload_1
    //   201: getfield 304	org/apache/lucene/index/MergePolicy$OneMerge:optimize	Z
    //   204: invokespecial 327	org/apache/lucene/index/IndexWriter:updatePendingMerges	(IZ)V
    //   207: aload 6
    //   209: monitorexit
    //   210: goto +11 -> 221
    //   213: astore 7
    //   215: aload 6
    //   217: monitorexit
    //   218: aload 7
    //   220: athrow
    //   221: ret 5
    //   223: goto +12 -> 235
    //   226: astore_3
    //   227: aload_0
    //   228: aload_3
    //   229: ldc_w 553
    //   232: invokespecial 240	org/apache/lucene/index/IndexWriter:handleOOM	(Ljava/lang/OutOfMemoryError;Ljava/lang/String;)V
    //   235: return
    // Line number table:
    //   Java source line #4689	-> byte code offset #0
    //   Java source line #4694	-> byte code offset #2
    //   Java source line #4696	-> byte code offset #7
    //   Java source line #4697	-> byte code offset #14
    //   Java source line #4699	-> byte code offset #68
    //   Java source line #4700	-> byte code offset #74
    //   Java source line #4701	-> byte code offset #79
    //   Java source line #4704	-> byte code offset #81
    //   Java source line #4702	-> byte code offset #84
    //   Java source line #4703	-> byte code offset #85
    //   Java source line #4705	-> byte code offset #91
    //   Java source line #4722	-> byte code offset #94
    //   Java source line #4706	-> byte code offset #97
    //   Java source line #4707	-> byte code offset #112
    //   Java source line #4709	-> byte code offset #117
    //   Java source line #4710	-> byte code offset #121
    //   Java source line #4711	-> byte code offset #128
    //   Java source line #4712	-> byte code offset #135
    //   Java source line #4713	-> byte code offset #156
    //   Java source line #4719	-> byte code offset #170
    //   Java source line #4720	-> byte code offset #195
    //   Java source line #4721	-> byte code offset #207
    //   Java source line #4725	-> byte code offset #223
    //   Java source line #4723	-> byte code offset #226
    //   Java source line #4724	-> byte code offset #227
    //   Java source line #4726	-> byte code offset #235
    // Local variable table:
    //   start	length	slot	name	signature
    //   0	236	0	this	IndexWriter
    //   0	236	1	merge	MergePolicy.OneMerge
    //   1	170	2	success	boolean
    //   84	3	3	t	Throwable
    //   226	3	3	oom	OutOfMemoryError
    //   97	6	4	localObject1	Object
    //   105	1	5	localObject2	Object
    //   109	107	6	Ljava/lang/Object;	Object
    //   213	6	7	localObject3	Object
    // Exception table:
    //   from	to	target	type
    //   2	81	84	java/lang/Throwable
    //   2	94	97	finally
    //   97	102	97	finally
    //   112	210	213	finally
    //   213	218	213	finally
    //   2	223	226	java/lang/OutOfMemoryError
  }
  
  void mergeSuccess(MergePolicy.OneMerge merge) {}
  
  public static abstract class IndexReaderWarmer
  {
    public abstract void warm(IndexReader paramIndexReader)
      throws IOException;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.IndexWriter
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

final class IntBlockPool
{
  public int[][] buffers = new int[10][];
  int bufferUpto = -1;
  public int intUpto = 8192;
  public int[] buffer;
  public int intOffset = 57344;
  private final DocumentsWriter docWriter;
  final boolean trackAllocations;
  
  public IntBlockPool(DocumentsWriter docWriter, boolean trackAllocations)
  {
    this.docWriter = docWriter;
    this.trackAllocations = trackAllocations;
  }
  
  public void reset()
  {
    if (bufferUpto != -1)
    {
      if (bufferUpto > 0) {
        docWriter.recycleIntBlocks(buffers, 1, 1 + bufferUpto);
      }
      bufferUpto = 0;
      intUpto = 0;
      intOffset = 0;
      buffer = buffers[0];
    }
  }
  
  public void nextBuffer()
  {
    if (1 + bufferUpto == buffers.length)
    {
      int[][] newBuffers = new int[(int)(buffers.length * 1.5D)][];
      System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
      buffers = newBuffers;
    }
    buffer = (buffers[(1 + bufferUpto)] = docWriter.getIntBlock(trackAllocations));
    bufferUpto += 1;
    
    intUpto = 0;
    intOffset += 8192;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.IntBlockPool
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;
import java.util.Map;

abstract class InvertedDocConsumer
{
  FieldInfos fieldInfos;
  
  abstract InvertedDocConsumerPerThread addThread(DocInverterPerThread paramDocInverterPerThread);
  
  abstract void abort();
  
  abstract void flush(Map paramMap, SegmentWriteState paramSegmentWriteState)
    throws IOException;
  
  abstract void closeDocStore(SegmentWriteState paramSegmentWriteState)
    throws IOException;
  
  abstract boolean freeRAM();
  
  void setFieldInfos(FieldInfos fieldInfos)
  {
    this.fieldInfos = fieldInfos;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocConsumer
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;
import org.apache.lucene.document.Fieldable;

abstract class InvertedDocConsumerPerField
{
  abstract boolean start(Fieldable[] paramArrayOfFieldable, int paramInt)
    throws IOException;
  
  abstract void start(Fieldable paramFieldable);
  
  abstract void add()
    throws IOException;
  
  abstract void finish()
    throws IOException;
  
  abstract void abort();
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocConsumerPerField
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;

abstract class InvertedDocConsumerPerThread
{
  abstract void startDocument()
    throws IOException;
  
  abstract InvertedDocConsumerPerField addField(DocInverterPerField paramDocInverterPerField, FieldInfo paramFieldInfo);
  
  abstract DocumentsWriter.DocWriter finishDocument()
    throws IOException;
  
  abstract void abort();
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocConsumerPerThread
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;
import java.util.Map;

abstract class InvertedDocEndConsumer
{
  abstract InvertedDocEndConsumerPerThread addThread(DocInverterPerThread paramDocInverterPerThread);
  
  abstract void flush(Map paramMap, SegmentWriteState paramSegmentWriteState)
    throws IOException;
  
  abstract void closeDocStore(SegmentWriteState paramSegmentWriteState)
    throws IOException;
  
  abstract void abort();
  
  abstract void setFieldInfos(FieldInfos paramFieldInfos);
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocEndConsumer
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

abstract class InvertedDocEndConsumerPerField
{
  abstract void finish();
  
  abstract void abort();
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocEndConsumerPerField
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

abstract class InvertedDocEndConsumerPerThread
{
  abstract void startDocument();
  
  abstract InvertedDocEndConsumerPerField addField(DocInverterPerField paramDocInverterPerField, FieldInfo paramFieldInfo);
  
  abstract void finishDocument();
  
  abstract void abort();
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.InvertedDocEndConsumerPerThread
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.util.List;

public final class KeepOnlyLastCommitDeletionPolicy
  implements IndexDeletionPolicy
{
  public void onInit(List commits)
  {
    onCommit(commits);
  }
  
  public void onCommit(List commits)
  {
    int size = commits.size();
    for (int i = 0; i < size - 1; i++) {
      ((IndexCommit)commits.get(i)).delete();
    }
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;

public class LogByteSizeMergePolicy
  extends LogMergePolicy
{
  public static final double DEFAULT_MIN_MERGE_MB = 1.6D;
  public static final double DEFAULT_MAX_MERGE_MB = 9.223372036854776E18D;
  
  public LogByteSizeMergePolicy(IndexWriter writer)
  {
    super(writer);
    minMergeSize = 1677721L;
    maxMergeSize = Long.MAX_VALUE;
  }
  
  protected long size(SegmentInfo info)
    throws IOException
  {
    return sizeBytes(info);
  }
  
  public void setMaxMergeMB(double mb)
  {
    maxMergeSize = ((mb * 1024.0D * 1024.0D));
  }
  
  public double getMaxMergeMB()
  {
    return maxMergeSize / 1024.0D / 1024.0D;
  }
  
  public void setMinMergeMB(double mb)
  {
    minMergeSize = ((mb * 1024.0D * 1024.0D));
  }
  
  public double getMinMergeMB()
  {
    return minMergeSize / 1024.0D / 1024.0D;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.LogByteSizeMergePolicy
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;

public class LogDocMergePolicy
  extends LogMergePolicy
{
  public static final int DEFAULT_MIN_MERGE_DOCS = 1000;
  
  public LogDocMergePolicy(IndexWriter writer)
  {
    super(writer);
    minMergeSize = 1000L;
    
    maxMergeSize = Long.MAX_VALUE;
  }
  
  protected long size(SegmentInfo info)
    throws IOException
  {
    return sizeDocs(info);
  }
  
  public void setMinMergeDocs(int minMergeDocs)
  {
    minMergeSize = minMergeDocs;
  }
  
  public int getMinMergeDocs()
  {
    return (int)minMergeSize;
  }
}

/* Location:
 * Qualified Name:     org.apache.lucene.index.LogDocMergePolicy
 * Java Class Version: 1.4 (48.0)
 * JD-Core Version:    0.7.1
 */
package org.apache.lucene.index;

import java.io.IOException;
import java.util.List;
import java.util.Set;

public abstract class LogMergePolicy
  extends MergePolicy
{
  public static final double LEVEL_LOG_SPAN = 0.75D;
  public static final int DEFAULT_MERGE_FACTOR = 10;
  public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
  public static final double DEFAULT_NO_CFS_RATIO = 0.1D;
  private int mergeFactor = 10;
  long minMergeSize;
  long maxMergeSize;
  int maxMergeDocs = Integer.MAX_VALUE;
  protected double noCFSRatio = 0.1D;
  protected boolean calibrateSizeByDeletes = false;
  private boolean useCompoundFile = true;
  private boolean useCompoundDocStore = true;
  
  public LogMergePolicy(IndexWriter writer)
  {
    super(writer);
  }
  
  protected boolean verbose()
  {
    return (writer != null) && (writer.verbose());
  }
  
  public double getNoCFSRatio()
  {
    return noCFSRatio;
  }
  
  public void setNoCFSRatio(double noCFSRatio)
  {
    if ((noCFSRatio < 0.0D) || (noCFSRatio > 1.0D)) {
      throw new IllegalArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
    }
    this.noCFSRatio = noCFSRatio;
  }
  
  private void message(String message)
  {
    if (verbose()) {
      writer.message("LMP: " + message);
    }
  }
  
  public int getMergeFactor()
  {
    return mergeFactor;
  }
  
  public void setMergeFactor(int mergeFactor)
  {
    if (mergeFactor < 2) {
      throw new IllegalArgumentException("mergeFactor cannot be less than 2");
    }
    this.mergeFactor = mergeFactor;
  }
  
  public boolean useCompoundFile(SegmentInfos infos, SegmentInfo info)
  {
    return useCompoundFile;
  }
  
  public void setUseCompoundFile(boolean useCompoundFile)
  {
    this.useCompoundFile = useCompoundFile;
  }
  
  public boolean getUseCompoundFile()
  {
    return useCompoundFile;
  }
  
  public boolean useCompoundDocStore(SegmentInfos infos)
  {
    return useCompoundDocStore;
  }
  
  public void setUseCompoundDocStore(boolean useCompoundDocStore)
  {
    this.useCompoundDocStore = useCompoundDocStore;
  }
  
  public boolean getUseCompoundDocStore()
  {
    return useCompoundDocStore;
  }
  
  public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes)
  {
    this.calibrateSizeByDeletes = calibrateSizeByDeletes;
  }
  
  public boolean getCalibrateSizeByDeletes()
  {
    return calibrateSizeByDeletes;
  }
  
  public void close() {}
  
  protected abstract long size(SegmentInfo paramSegmentInfo)
    throws IOException;
  
  protected long sizeDocs(SegmentInfo info)
    throws IOException
  {
    if (calibrateSizeByDeletes)
    {
      int delCount = writer.numDeletedDocs(info);
      return docCount - delCount;
    }
    return docCount;
  }
  
  protected long sizeBytes(SegmentInfo info)
    throws IOException
  {
    long byteSize = info.sizeInBytes();
    if (calibrateSizeByDeletes)
    {
      int delCount = writer.numDeletedDocs(info);
      float delRatio = docCount <= 0 ? 0.0F : delCount / docCount;
      return docCount <= 0 ? byteSize : ((float)byteSize * (1.0F - delRatio));
    }
    return byteSize;
  }
  
  private boolean isOptimized(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize)
    throws IOException
  {
    int numSegments = infos.size();
    int numToOptimize = 0;
    SegmentInfo optimizeInfo = null;
    for (int i = 0; (i < numSegments) && (numToOptimize <= maxNumSegments); i++)
    {
      SegmentInfo info = infos.info(i);
      if (segmentsToOptimize.contains(info))
      {
        numToOptimize++;
        optimizeInfo = info;
      }
    }
    return (numToOptimize <= maxNumSegments) && ((numToOptimize != 1) || (isOptimized(optimizeInfo)));
  }
  
  private boolean isOptimized(SegmentInfo info)
    throws IOException
  {
    boolean hasDeletions = writer.numDeletedDocs(info) > 0;
    return (!hasDeletions) && (!info.hasSeparateNorms()) && (dir == writer.getDirectory()) && ((info.getUseCompoundFile() == useCompoundFile) || (noCFSRatio < 1.0D));
  }
  
  public MergePolicy.MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize)
    throws IOException
  {
    assert (maxNumSegments > 0);
    MergePolicy.MergeSpecification spec;
    MergePolicy.MergeSpecification spec;
    if (!isOptimized(infos, max
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

Further reading...

For more information on Java 1.5 Tiger, you may find Java 1.5 Tiger, A developer's Notebook by D. Flanagan and B. McLaughlin from O'Reilly of interest.

New!JAR listings


Copyright 2006-2017. Infinite Loop Ltd