コード例 #1
0
ファイル: IndexHistory.java プロジェクト: nabils/jackrabbit
 /** Removes index infos older than {@link #maxAge} from this history. */
 void pruneOutdated() {
   long threshold = System.currentTimeMillis() - maxAge;
   log.debug("Pruning index infos older than: " + threshold + "(" + indexDir + ")");
   Iterator<IndexInfos> it = indexInfosMap.values().iterator();
   // never prune the current generation
   if (it.hasNext()) {
     IndexInfos infos = it.next();
     log.debug("Skipping first index infos. generation=" + infos.getGeneration());
   }
   while (it.hasNext()) {
     IndexInfos infos = (IndexInfos) it.next();
     if (infos.getLastModified() < threshold) {
       // check associated redo log
       try {
         String logName = getRedoLogName(infos.getGeneration());
         if (indexDir.fileExists(logName)) {
           long lastModified = indexDir.fileModified(logName);
           if (lastModified > threshold) {
             log.debug(
                 "Keeping redo log with generation={}, timestamp={}",
                 infos.getGeneration(),
                 lastModified);
             continue;
           }
           // try do delete it
           try {
             indexDir.deleteFile(logName);
             log.debug(
                 "Deleted redo log with generation={}, timestamp={}",
                 infos.getGeneration(),
                 lastModified);
           } catch (IOException e) {
             log.warn("Unable to delete: " + indexDir + "/" + logName);
             continue;
           }
         }
         // delete index infos
         try {
           indexDir.deleteFile(infos.getFileName());
           log.debug("Deleted index infos with generation={}", infos.getGeneration());
           it.remove();
         } catch (IOException e) {
           log.warn("Unable to delete: " + indexDir + "/" + infos.getFileName());
         }
       } catch (IOException e) {
         log.warn("Failed to check if {} is outdated: {}", infos.getFileName(), e);
       }
     }
   }
 }
コード例 #2
0
  /*
   * Test a deletion policy that keeps last N commits.
   */
  public void testKeepLastNDeletionPolicy() throws IOException {
    final int N = 5;

    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();
      if (dir instanceof MockDirectoryWrapper) {
        // test manually deletes files
        ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
      }

      KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
      for (int j = 0; j < N + 1; j++) {
        IndexWriterConfig conf =
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setOpenMode(OpenMode.CREATE)
                .setIndexDeletionPolicy(policy)
                .setMaxBufferedDocs(10);
        MergePolicy mp = conf.getMergePolicy();
        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
        IndexWriter writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        for (int i = 0; i < 17; i++) {
          addDoc(writer);
        }
        writer.forceMerge(1);
        writer.close();
      }

      assertTrue(policy.numDelete > 0);
      assertEquals(N + 1, policy.numOnInit);
      assertEquals(N + 1, policy.numOnCommit);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);
      for (int i = 0; i < N + 1; i++) {
        try {
          IndexReader reader = DirectoryReader.open(dir);
          reader.close();
          if (i == N) {
            fail("should have failed on commits prior to last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
コード例 #3
0
ファイル: SegmentInfos.java プロジェクト: kerie/marswork
  final void finishCommit(Directory dir) throws IOException {
    if (pendingSegnOutput == null) throw new IllegalStateException("prepareCommit was not called");
    boolean success = false;
    try {
      pendingSegnOutput.finishCommit();
      pendingSegnOutput.close();
      pendingSegnOutput = null;
      success = true;
    } finally {
      if (!success) rollbackCommit(dir);
    }

    // NOTE: if we crash here, we have left a segments_N
    // file in the directory in a possibly corrupt state (if
    // some bytes made it to stable storage and others
    // didn't).  But, the segments_N file includes checksum
    // at the end, which should catch this case.  So when a
    // reader tries to read it, it will throw a
    // CorruptIndexException, which should cause the retry
    // logic in SegmentInfos to kick in and load the last
    // good (previous) segments_N-1 file.

    final String fileName =
        IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
    success = false;
    try {
      dir.sync(Collections.singleton(fileName));
      success = true;
    } finally {
      if (!success) {
        try {
          dir.deleteFile(fileName);
        } catch (Throwable t) {
          // Suppress so we keep throwing the original exception
        }
      }
    }

    lastGeneration = generation;

    try {
      IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN);
      try {
        genOutput.writeInt(FORMAT_LOCKLESS);
        genOutput.writeLong(generation);
        genOutput.writeLong(generation);
      } finally {
        genOutput.close();
      }
    } catch (ThreadInterruptedException t) {
      throw t;
    } catch (Throwable t) {
      // It's OK if we fail to write this file since it's
      // used only as one of the retry fallbacks.
    }
  }
コード例 #4
0
ファイル: SegmentInfos.java プロジェクト: kerie/marswork
  private final void write(Directory directory) throws IOException {

    String segmentFileName = getNextSegmentFileName();

    // Always advance the generation on write:
    if (generation == -1) {
      generation = 1;
    } else {
      generation++;
    }

    ChecksumIndexOutput segnOutput =
        new ChecksumIndexOutput(directory.createOutput(segmentFileName));

    boolean success = false;

    try {
      segnOutput.writeInt(CURRENT_FORMAT); // write FORMAT
      segnOutput.writeLong(version);
      segnOutput.writeInt(counter); // write counter
      segnOutput.writeInt(size()); // write infos
      for (SegmentInfo si : this) {
        si.write(segnOutput);
      }
      segnOutput.writeStringStringMap(userData);
      segnOutput.prepareCommit();
      pendingSegnOutput = segnOutput;
      success = true;
    } finally {
      if (!success) {
        // We hit an exception above; try to close the file
        // but suppress any exception:
        IOUtils.closeSafely(true, segnOutput);
        try {
          // Try not to leave a truncated segments_N file in
          // the index:
          directory.deleteFile(segmentFileName);
        } catch (Throwable t) {
          // Suppress so we keep throwing the original exception
        }
      }
    }
  }
コード例 #5
0
  /**
   * closes temporary file, compresses data and removes temporary file.
   *
   * @throws IOException
   */
  @Override
  public void close() throws IOException {
    byte[] buffer = new byte[chunkSize];
    tempOut.close();
    // directory with offsets offsets of compressed chunks with
    // real position in decompressed stream
    IndexInput in = tempDirectory.openInput(tmpName);
    long len = closeLength = in.length();
    // write length of the file at the begining for easier retreval
    output.writeLong(-1);

    // write configuration
    writeConfig();
    int toRead;
    // read all data and compresse it in variable block chunks
    while (len > 0) {
      if (len > buffer.length) {
        toRead = buffer.length;
      } else {
        toRead = (int) len;
      }

      // just for safety --- can be improoved
      long bufferPos = in.getFilePointer();
      // read original data
      in.readBytes(buffer, 0, toRead);

      writeChunk(buffer, bufferPos, toRead);

      len -= toRead;
    }
    // now let's crate directory entry of all chunks and their's original
    // position in inflated stream

    in.close();
    if (tempDirectory.fileExists(tmpName)) {
      tempDirectory.deleteFile(tmpName);
    }
    super.close();
  }
コード例 #6
0
ファイル: SegmentInfos.java プロジェクト: kerie/marswork
  final void rollbackCommit(Directory dir) throws IOException {
    if (pendingSegnOutput != null) {
      try {
        pendingSegnOutput.close();
      } catch (Throwable t) {
        // Suppress so we keep throwing the original exception
        // in our caller
      }

      // Must carefully compute fileName from "generation"
      // since lastGeneration isn't incremented:
      try {
        final String segmentFileName =
            IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
        dir.deleteFile(segmentFileName);
      } catch (Throwable t) {
        // Suppress so we keep throwing the original exception
        // in our caller
      }
      pendingSegnOutput = null;
    }
  }
コード例 #7
0
  /*
   * Test a deletion policy that keeps last N commits
   * around, through creates.
   */
  public void testKeepLastNDeletionPolicyWithCreates() throws IOException {

    final int N = 10;

    for (int pass = 0; pass < 2; pass++) {

      boolean useCompoundFile = (pass % 2) != 0;

      Directory dir = newDirectory();
      if (dir instanceof MockDirectoryWrapper) {
        // test manually deletes files
        ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
      }
      IndexWriterConfig conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.CREATE)
              .setIndexDeletionPolicy(new KeepLastNDeletionPolicy(N))
              .setMaxBufferedDocs(10);
      MergePolicy mp = conf.getMergePolicy();
      mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
      IndexWriter writer = new IndexWriter(dir, conf);
      KeepLastNDeletionPolicy policy =
          (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      writer.close();
      Term searchTerm = new Term("content", "aaa");
      Query query = new TermQuery(searchTerm);

      for (int i = 0; i < N + 1; i++) {

        conf =
            newIndexWriterConfig(new MockAnalyzer(random()))
                .setOpenMode(OpenMode.APPEND)
                .setIndexDeletionPolicy(policy)
                .setMaxBufferedDocs(10);
        mp = conf.getMergePolicy();
        mp.setNoCFSRatio(useCompoundFile ? 1.0 : 0.0);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        for (int j = 0; j < 17; j++) {
          addDocWithID(writer, i * (N + 1) + j);
        }
        // this is a commit
        writer.close();
        conf =
            new IndexWriterConfig(new MockAnalyzer(random()))
                .setIndexDeletionPolicy(policy)
                .setMergePolicy(NoMergePolicy.INSTANCE);
        writer = new IndexWriter(dir, conf);
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        writer.deleteDocuments(new Term("id", "" + (i * (N + 1) + 3)));
        // this is a commit
        writer.close();
        IndexReader reader = DirectoryReader.open(dir);
        IndexSearcher searcher = newSearcher(reader);
        ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
        assertEquals(16, hits.length);
        reader.close();

        writer =
            new IndexWriter(
                dir,
                newIndexWriterConfig(new MockAnalyzer(random()))
                    .setOpenMode(OpenMode.CREATE)
                    .setIndexDeletionPolicy(policy));
        policy = (KeepLastNDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        // This will not commit: there are no changes
        // pending because we opened for "create":
        writer.close();
      }

      assertEquals(3 * (N + 1) + 1, policy.numOnInit);
      assertEquals(3 * (N + 1) + 1, policy.numOnCommit);

      IndexReader rwReader = DirectoryReader.open(dir);
      IndexSearcher searcher = newSearcher(rwReader);
      ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
      assertEquals(0, hits.length);

      // Simplistic check: just verify only the past N segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);

      int expectedCount = 0;

      rwReader.close();

      for (int i = 0; i < N + 1; i++) {
        try {
          IndexReader reader = DirectoryReader.open(dir);

          // Work backwards in commits on what the expected
          // count should be.
          searcher = newSearcher(reader);
          hits = searcher.search(query, 1000).scoreDocs;
          assertEquals(expectedCount, hits.length);
          if (expectedCount == 0) {
            expectedCount = 16;
          } else if (expectedCount == 16) {
            expectedCount = 17;
          } else if (expectedCount == 17) {
            expectedCount = 0;
          }
          reader.close();
          if (i == N) {
            fail("should have failed on commits before last " + N);
          }
        } catch (IOException e) {
          if (i != N) {
            throw e;
          }
        }
        if (i < N) {
          dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        }
        gen--;
      }

      dir.close();
    }
  }
コード例 #8
0
  /*
   * Test a silly deletion policy that keeps all commits around.
   */
  public void testKeepAllDeletionPolicy() throws IOException {
    for (int pass = 0; pass < 2; pass++) {

      if (VERBOSE) {
        System.out.println("TEST: cycle pass="******"TEST: open writer for forceMerge");
        }
        writer = new IndexWriter(dir, conf);
        policy = (KeepAllDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        writer.forceMerge(1);
        writer.close();
      }

      assertEquals(needsMerging ? 2 : 1, policy.numOnInit);

      // If we are not auto committing then there should
      // be exactly 2 commits (one per close above):
      assertEquals(1 + (needsMerging ? 1 : 0), policy.numOnCommit);

      // Test listCommits
      Collection<IndexCommit> commits = DirectoryReader.listCommits(dir);
      // 2 from closing writer
      assertEquals(1 + (needsMerging ? 1 : 0), commits.size());

      // Make sure we can open a reader on each commit:
      for (final IndexCommit commit : commits) {
        IndexReader r = DirectoryReader.open(commit);
        r.close();
      }

      // Simplistic check: just verify all segments_N's still
      // exist, and, I can open a reader on each:
      long gen = SegmentInfos.getLastCommitGeneration(dir);
      while (gen > 0) {
        IndexReader reader = DirectoryReader.open(dir);
        reader.close();
        dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
        gen--;

        if (gen > 0) {
          // Now that we've removed a commit point, which
          // should have orphan'd at least one index file.
          // Open & close a writer and assert that it
          // actually removed something:
          int preCount = dir.listAll().length;
          writer =
              new IndexWriter(
                  dir,
                  newIndexWriterConfig(new MockAnalyzer(random()))
                      .setOpenMode(OpenMode.APPEND)
                      .setIndexDeletionPolicy(policy));
          writer.close();
          int postCount = dir.listAll().length;
          assertTrue(postCount < preCount);
        }
      }

      dir.close();
    }
  }
コード例 #9
0
  /*
   * Test "by time expiration" deletion policy:
   */
  public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException {

    final double SECONDS = 2.0;

    Directory dir = newDirectory();
    if (dir instanceof MockDirectoryWrapper) {
      // test manually deletes files
      ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
    }
    IndexWriterConfig conf =
        newIndexWriterConfig(new MockAnalyzer(random()))
            .setIndexDeletionPolicy(new ExpirationTimeDeletionPolicy(dir, SECONDS));
    MergePolicy mp = conf.getMergePolicy();
    mp.setNoCFSRatio(1.0);
    IndexWriter writer = new IndexWriter(dir, conf);
    ExpirationTimeDeletionPolicy policy =
        (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
    Map<String, String> commitData = new HashMap<>();
    commitData.put("commitTime", String.valueOf(System.currentTimeMillis()));
    writer.setCommitData(commitData);
    writer.commit();
    writer.close();

    long lastDeleteTime = 0;
    final int targetNumDelete = TestUtil.nextInt(random(), 1, 5);
    while (policy.numDelete < targetNumDelete) {
      // Record last time when writer performed deletes of
      // past commits
      lastDeleteTime = System.currentTimeMillis();
      conf =
          newIndexWriterConfig(new MockAnalyzer(random()))
              .setOpenMode(OpenMode.APPEND)
              .setIndexDeletionPolicy(policy);
      mp = conf.getMergePolicy();
      mp.setNoCFSRatio(1.0);
      writer = new IndexWriter(dir, conf);
      policy = (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
      for (int j = 0; j < 17; j++) {
        addDoc(writer);
      }
      commitData = new HashMap<>();
      commitData.put("commitTime", String.valueOf(System.currentTimeMillis()));
      writer.setCommitData(commitData);
      writer.commit();
      writer.close();

      Thread.sleep((int) (1000.0 * (SECONDS / 5.0)));
    }

    // Then simplistic check: just verify that the
    // segments_N's that still exist are in fact within SECONDS
    // seconds of the last one's mod time, and, that I can
    // open a reader on each:
    long gen = SegmentInfos.getLastCommitGeneration(dir);

    String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
    boolean oneSecondResolution = true;

    while (gen > 0) {
      try {
        IndexReader reader = DirectoryReader.open(dir);
        reader.close();
        fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);

        // if we are on a filesystem that seems to have only
        // 1 second resolution, allow +1 second in commit
        // age tolerance:
        SegmentInfos sis = SegmentInfos.readCommit(dir, fileName);
        long modTime = Long.parseLong(sis.getUserData().get("commitTime"));
        oneSecondResolution &= (modTime % 1000) == 0;
        final long leeway = (long) ((SECONDS + (oneSecondResolution ? 1.0 : 0.0)) * 1000);

        assertTrue(
            "commit point was older than "
                + SECONDS
                + " seconds ("
                + (lastDeleteTime - modTime)
                + " msec) but did not get deleted ",
            lastDeleteTime - modTime <= leeway);
      } catch (IOException e) {
        // OK
        break;
      }

      dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
      gen--;
    }

    dir.close();
  }
コード例 #10
0
ファイル: DirectorySupport.java プロジェクト: radekzak/neo4j
 public static void deleteDirectoryContents(Directory directory) throws IOException {
   for (String fileName : directory.listAll()) directory.deleteFile(fileName);
 }
コード例 #11
0
  @Test(enabled = false)
  public void testReadChunks() throws Exception {
    final int BUFFER_SIZE = 64;

    Cache cache = cacheManager.getCache();
    Directory dir =
        DirectoryBuilder.newDirectoryInstance(cache, cache, cache, INDEXNAME)
            .chunkSize(BUFFER_SIZE)
            .create();

    // create file headers
    FileMetadata file1 = new FileMetadata(5);
    FileCacheKey key1 = new FileCacheKey(INDEXNAME, "Hello.txt");
    cache.put(key1, file1);

    FileMetadata file2 = new FileMetadata(5);
    FileCacheKey key2 = new FileCacheKey(INDEXNAME, "World.txt");
    cache.put(key2, file2);

    // byte array for Hello.txt
    String helloText = "Hello world.  This is some text.";
    cache.put(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0, BUFFER_SIZE), helloText.getBytes());

    // byte array for World.txt - should be in at least 2 chunks.
    String worldText =
        "This String should contain more than sixty four characters but less than one hundred and twenty eight.";
    assert worldText.getBytes().length > BUFFER_SIZE;
    assert worldText.getBytes().length < (2 * BUFFER_SIZE);

    byte[] buf = new byte[BUFFER_SIZE];
    System.arraycopy(worldText.getBytes(), 0, buf, 0, BUFFER_SIZE);
    cache.put(new ChunkCacheKey(INDEXNAME, "World.txt", 0, BUFFER_SIZE), buf);

    String part1 = new String(buf);
    buf = new byte[BUFFER_SIZE];
    System.arraycopy(worldText.getBytes(), BUFFER_SIZE, buf, 0, worldText.length() - BUFFER_SIZE);
    cache.put(new ChunkCacheKey(INDEXNAME, "World.txt", 1, BUFFER_SIZE), buf);
    String part2 = new String(buf);

    // make sure the generated bytes do add up!
    AssertJUnit.assertEquals(part1 + part2.trim(), worldText);

    file1.setSize(helloText.length());
    file2.setSize(worldText.length());

    Set<String> s = new HashSet<String>();
    s.add("Hello.txt");
    s.add("World.txt");
    Set other = new HashSet(Arrays.asList(dir.listAll()));

    // ok, file listing works.
    AssertJUnit.assertEquals(s, other);

    IndexInput ii = dir.openInput("Hello.txt");

    assert ii.length() == helloText.length();

    ByteArrayOutputStream baos = new ByteArrayOutputStream();

    for (int i = 0; i < ii.length(); i++) {
      baos.write(ii.readByte());
    }

    assert new String(baos.toByteArray()).equals(helloText);

    ii = dir.openInput("World.txt");

    assert ii.length() == worldText.length();

    baos = new ByteArrayOutputStream();

    for (int i = 0; i < ii.length(); i++) {
      baos.write(ii.readByte());
    }

    assert new String(baos.toByteArray()).equals(worldText);

    // now with buffered reading

    ii = dir.openInput("Hello.txt");

    assert ii.length() == helloText.length();

    baos = new ByteArrayOutputStream();

    long toRead = ii.length();
    while (toRead > 0) {
      buf = new byte[19]; // suitably arbitrary
      int bytesRead = (int) Math.min(toRead, 19);
      ii.readBytes(buf, 0, bytesRead);
      toRead = toRead - bytesRead;
      baos.write(buf, 0, bytesRead);
    }

    assert new String(baos.toByteArray()).equals(helloText);

    ii = dir.openInput("World.txt");

    assert ii.length() == worldText.length();

    baos = new ByteArrayOutputStream();

    toRead = ii.length();
    while (toRead > 0) {
      buf = new byte[19]; // suitably arbitrary
      int bytesRead = (int) Math.min(toRead, 19);
      ii.readBytes(buf, 0, bytesRead);
      toRead = toRead - bytesRead;
      baos.write(buf, 0, bytesRead);
    }

    assert new String(baos.toByteArray()).equals(worldText);

    dir.deleteFile("Hello.txt");
    assert null == cache.get(new FileCacheKey(INDEXNAME, "Hello.txt"));
    assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0, BUFFER_SIZE));

    Object ob1 = cache.get(new FileCacheKey(INDEXNAME, "World.txt"));
    Object ob2 = cache.get(new ChunkCacheKey(INDEXNAME, "World.txt", 0, BUFFER_SIZE));
    Object ob3 = cache.get(new ChunkCacheKey(INDEXNAME, "World.txt", 1, BUFFER_SIZE));

    ((DirectoryExtensions) dir).renameFile("World.txt", "HelloWorld.txt");
    assert null == cache.get(new FileCacheKey(INDEXNAME, "Hello.txt"));
    assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0, BUFFER_SIZE));
    assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 1, BUFFER_SIZE));

    assert cache.get(new FileCacheKey(INDEXNAME, "HelloWorld.txt")).equals(ob1);
    assert cache.get(new ChunkCacheKey(INDEXNAME, "HelloWorld.txt", 0, BUFFER_SIZE)).equals(ob2);
    assert cache.get(new ChunkCacheKey(INDEXNAME, "HelloWorld.txt", 1, BUFFER_SIZE)).equals(ob3);

    // test that contents survives a move
    ii = dir.openInput("HelloWorld.txt");

    assert ii.length() == worldText.length();

    baos = new ByteArrayOutputStream();

    toRead = ii.length();
    while (toRead > 0) {
      buf = new byte[19]; // suitably arbitrary
      int bytesRead = (int) Math.min(toRead, 19);
      ii.readBytes(buf, 0, bytesRead);
      toRead = toRead - bytesRead;
      baos.write(buf, 0, bytesRead);
    }

    assert new String(baos.toByteArray()).equals(worldText);

    dir.close();
    DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
  }
コード例 #12
0
 @Override
 public void deleteFile(String name) throws IOException {
   fsDir.deleteFile(name);
 }
コード例 #13
0
  private boolean doRestore() throws Exception {

    Path backupPath = Paths.get(backupLocation).resolve(backupName);
    SimpleDateFormat dateFormat = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT);
    String restoreIndexName = "restore." + dateFormat.format(new Date());
    String restoreIndexPath = core.getDataDir() + restoreIndexName;

    Directory restoreIndexDir = null;
    Directory indexDir = null;
    try (Directory backupDir = FSDirectory.open(backupPath)) {

      final Version version =
          IndexFetcher.checkOldestVersion(SegmentInfos.readLatestCommit(backupDir));

      restoreIndexDir =
          core.getDirectoryFactory()
              .get(
                  restoreIndexPath,
                  DirectoryFactory.DirContext.DEFAULT,
                  core.getSolrConfig().indexConfig.lockType);

      // Prefer local copy.
      indexDir =
          core.getDirectoryFactory()
              .get(
                  core.getIndexDir(),
                  DirectoryFactory.DirContext.DEFAULT,
                  core.getSolrConfig().indexConfig.lockType);

      // Move all files from backupDir to restoreIndexDir
      for (String filename : backupDir.listAll()) {
        checkInterrupted();
        log.info("Copying file {} to restore directory ", filename);
        try (IndexInput indexInput = backupDir.openInput(filename, IOContext.READONCE)) {
          Long checksum = null;
          try {
            checksum = CodecUtil.retrieveChecksum(indexInput);
          } catch (Exception e) {
            log.warn("Could not read checksum from index file: " + filename, e);
          }
          long length = indexInput.length();
          IndexFetcher.CompareResult compareResult =
              IndexFetcher.compareFile(indexDir, version, filename, length, checksum);
          if (!compareResult.equal
              || (!compareResult.checkSummed
                  && (filename.endsWith(".si")
                      || filename.endsWith(".liv")
                      || filename.startsWith("segments_")))) {
            restoreIndexDir.copyFrom(backupDir, filename, filename, IOContext.READONCE);
          } else {
            // prefer local copy
            restoreIndexDir.copyFrom(indexDir, filename, filename, IOContext.READONCE);
          }
        } catch (Exception e) {
          throw new SolrException(
              SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
        }
      }
      log.debug("Switching directories");
      IndexFetcher.modifyIndexProps(core, restoreIndexName);

      boolean success;
      try {
        core.getUpdateHandler().newIndexWriter(false);
        openNewSearcher();
        success = true;
        log.info("Successfully restored to the backup index");
      } catch (Exception e) {
        // Rollback to the old index directory. Delete the restore index directory and mark the
        // restore as failed.
        log.warn("Could not switch to restored index. Rolling back to the current index");
        Directory dir = null;
        try {
          dir =
              core.getDirectoryFactory()
                  .get(
                      core.getDataDir(),
                      DirectoryFactory.DirContext.META_DATA,
                      core.getSolrConfig().indexConfig.lockType);
          dir.deleteFile(IndexFetcher.INDEX_PROPERTIES);
        } finally {
          if (dir != null) {
            core.getDirectoryFactory().release(dir);
          }
        }

        core.getDirectoryFactory().doneWithDirectory(restoreIndexDir);
        core.getDirectoryFactory().remove(restoreIndexDir);
        core.getUpdateHandler().newIndexWriter(false);
        openNewSearcher();
        throw new SolrException(
            SolrException.ErrorCode.UNKNOWN, "Exception while restoring the backup index", e);
      }
      if (success) {
        core.getDirectoryFactory().doneWithDirectory(indexDir);
        core.getDirectoryFactory().remove(indexDir);
      }

      return true;
    } finally {
      if (restoreIndexDir != null) {
        core.getDirectoryFactory().release(restoreIndexDir);
      }
      if (indexDir != null) {
        core.getDirectoryFactory().release(indexDir);
      }
    }
  }