private int findLatestIndex() throws IOException {
    ImmutableMap<String, BlobMetaData> blobs = metaDataBlobContainer.listBlobsByPrefix("metadata-");

    int index = -1;
    for (BlobMetaData md : blobs.values()) {
      if (logger.isTraceEnabled()) {
        logger.trace("[findLatestMetadata]: Processing [" + md.name() + "]");
      }
      String name = md.name();
      int fileIndex = Integer.parseInt(name.substring(name.indexOf('-') + 1));
      if (fileIndex >= index) {
        // try and read the meta data
        byte[] data = null;
        try {
          data = metaDataBlobContainer.readBlobFully(name);
          readMetaData(data);
          index = fileIndex;
        } catch (IOException e) {
          logger.warn(
              "[findLatestMetadata]: failed to read metadata from [{}], data_length [{}] ignoring...",
              e,
              name,
              data == null ? "na" : data.length);
        }
      }
    }

    return index;
  }
 private void snapshotTranslog(Translog.Snapshot snapshot, CommitPoint.FileInfo fileInfo)
     throws IOException {
   blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes());
   //
   //        long chunkBytes = Long.MAX_VALUE;
   //        if (chunkSize != null) {
   //            chunkBytes = chunkSize.bytes();
   //        }
   //
   //        long totalLength = fileInfo.length();
   //        long numberOfChunks = totalLength / chunkBytes;
   //        if (totalLength % chunkBytes > 0) {
   //            numberOfChunks++;
   //        }
   //        if (numberOfChunks == 0) {
   //            numberOfChunks++;
   //        }
   //
   //        if (numberOfChunks == 1) {
   //            blobContainer.writeBlob(fileInfo.name(), snapshot.stream(),
   // snapshot.lengthInBytes());
   //        } else {
   //            InputStream translogStream = snapshot.stream();
   //            long totalLengthLeftToWrite = totalLength;
   //            for (int i = 0; i < numberOfChunks; i++) {
   //                long lengthToWrite = chunkBytes;
   //                if (totalLengthLeftToWrite < chunkBytes) {
   //                    lengthToWrite = totalLengthLeftToWrite;
   //                }
   //                blobContainer.writeBlob(fileInfo.name() + ".part" + i, new
   // LimitInputStream(translogStream, lengthToWrite), lengthToWrite);
   //                totalLengthLeftToWrite -= lengthToWrite;
   //            }
   //        }
 }
  @Override
  public void write(MetaData metaData) throws GatewayException {
    final String newMetaData = "metadata-" + (currentIndex + 1);
    CachedStreamOutput.Entry cachedEntry = CachedStreamOutput.popEntry();
    try {
      StreamOutput streamOutput;
      if (compress) {
        streamOutput = cachedEntry.bytes(CompressorFactory.defaultCompressor());
      } else {
        streamOutput = cachedEntry.bytes();
      }
      XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, streamOutput);
      builder.startObject();
      MetaData.Builder.toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
      builder.endObject();
      builder.close();
      metaDataBlobContainer.writeBlob(
          newMetaData,
          new ByteArrayInputStream(
              cachedEntry.bytes().underlyingBytes(), 0, cachedEntry.bytes().size()),
          cachedEntry.bytes().size());
    } catch (IOException e) {
      throw new GatewayException("Failed to write metadata [" + newMetaData + "]", e);
    } finally {
      CachedStreamOutput.pushEntry(cachedEntry);
    }

    currentIndex++;

    try {
      metaDataBlobContainer.deleteBlobsByFilter(
          new BlobContainer.BlobNameFilter() {
            @Override
            public boolean accept(String blobName) {
              return blobName.startsWith("metadata-") && !newMetaData.equals(blobName);
            }
          });
    } catch (IOException e) {
      logger.debug("Failed to delete old metadata, will do it next time", e);
    }
  }
 private CommitPoints buildCommitPoints(ImmutableMap<String, BlobMetaData> blobs) {
   List<CommitPoint> commitPoints = Lists.newArrayList();
   for (String name : blobs.keySet()) {
     if (name.startsWith("commit-")) {
       try {
         commitPoints.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
       } catch (Exception e) {
         logger.warn("failed to read commit point [{}]", e, name);
       }
     }
   }
   return new CommitPoints(commitPoints);
 }
 public CommitPoint findCommitPoint(String index, int shardId) throws IOException {
   BlobPath path = BlobStoreIndexGateway.shardPath(basePath, index, shardId);
   ImmutableBlobContainer container = blobStore.immutableBlobContainer(path);
   ImmutableMap<String, BlobMetaData> blobs = container.listBlobs();
   List<CommitPoint> commitPointsList = Lists.newArrayList();
   for (BlobMetaData md : blobs.values()) {
     if (md.length() == 0) { // a commit point that was not flushed yet...
       continue;
     }
     if (md.name().startsWith("commit-")) {
       try {
         commitPointsList.add(CommitPoints.fromXContent(container.readBlobFully(md.name())));
       } catch (Exception e) {
         logger.warn("failed to read commit point at path {} with name [{}]", e, path, md.name());
       }
     }
   }
   CommitPoints commitPoints = new CommitPoints(commitPointsList);
   if (commitPoints.commits().isEmpty()) {
     return null;
   }
   return commitPoints.commits().get(0);
 }
  @Override
  public MetaData read() throws GatewayException {
    try {
      this.currentIndex = findLatestIndex();
    } catch (IOException e) {
      throw new GatewayException("Failed to find latest metadata to read from", e);
    }
    if (currentIndex == -1) return null;
    String metaData = "metadata-" + currentIndex;

    try {
      return readMetaData(metaDataBlobContainer.readBlobFully(metaData));
    } catch (GatewayException e) {
      throw e;
    } catch (Exception e) {
      throw new GatewayException("Failed to read metadata [" + metaData + "] from gateway", e);
    }
  }
 public ImmutableMap<String, BlobMetaData> listIndexBlobs(int shardId) throws IOException {
   ImmutableBlobContainer indexContainer =
       blobStore.immutableBlobContainer(shardIndexPath(shardId));
   return BlobStoreIndexShardGateway.buildVirtualBlobs(
       indexContainer, indexContainer.listBlobs(), null);
 }
  private void snapshotFile(
      Directory dir,
      final CommitPoint.FileInfo fileInfo,
      final CountDownLatch latch,
      final List<Throwable> failures)
      throws IOException {
    long chunkBytes = Long.MAX_VALUE;
    if (chunkSize != null) {
      chunkBytes = chunkSize.bytes();
    }

    long totalLength = fileInfo.length();
    long numberOfChunks = totalLength / chunkBytes;
    if (totalLength % chunkBytes > 0) {
      numberOfChunks++;
    }
    if (numberOfChunks == 0) {
      numberOfChunks++;
    }

    final long fNumberOfChunks = numberOfChunks;
    final AtomicLong counter = new AtomicLong(numberOfChunks);
    for (long i = 0; i < fNumberOfChunks; i++) {
      final long partNumber = i;

      IndexInput indexInput = null;
      try {
        indexInput = dir.openInput(fileInfo.physicalName());
        indexInput.seek(partNumber * chunkBytes);
        InputStreamIndexInput is = new ThreadSafeInputStreamIndexInput(indexInput, chunkBytes);

        String blobName = fileInfo.name();
        if (fNumberOfChunks > 1) {
          // if we do chunks, then all of them are in the form of "[xxx].part[N]".
          blobName += ".part" + partNumber;
        }

        final IndexInput fIndexInput = indexInput;
        blobContainer.writeBlob(
            blobName,
            is,
            is.actualSizeToRead(),
            new ImmutableBlobContainer.WriterListener() {
              @Override
              public void onCompleted() {
                try {
                  fIndexInput.close();
                } catch (IOException e) {
                  // ignore
                }
                if (counter.decrementAndGet() == 0) {
                  latch.countDown();
                }
              }

              @Override
              public void onFailure(Throwable t) {
                try {
                  fIndexInput.close();
                } catch (IOException e) {
                  // ignore
                }
                failures.add(t);
                if (counter.decrementAndGet() == 0) {
                  latch.countDown();
                }
              }
            });
      } catch (Exception e) {
        if (indexInput != null) {
          try {
            indexInput.close();
          } catch (IOException e1) {
            // ignore
          }
        }
        failures.add(e);
        latch.countDown();
      }
    }
  }
  private void recoverFile(
      final CommitPoint.FileInfo fileInfo,
      final ImmutableMap<String, BlobMetaData> blobs,
      final CountDownLatch latch,
      final List<Throwable> failures) {
    final IndexOutput indexOutput;
    try {
      // we create an output with no checksum, this is because the pure binary data of the file is
      // not
      // the checksum (because of seek). We will create the checksum file once copying is done
      indexOutput = store.createOutputWithNoChecksum(fileInfo.physicalName());
    } catch (IOException e) {
      failures.add(e);
      latch.countDown();
      return;
    }

    String firstFileToRecover = fileInfo.name();
    if (!blobs.containsKey(fileInfo.name())) {
      // chunking, append part0 to it
      firstFileToRecover = fileInfo.name() + ".part0";
    }
    if (!blobs.containsKey(firstFileToRecover)) {
      // no file, what to do, what to do?
      logger.warn(
          "no file [{}]/[{}] to recover, ignoring it", fileInfo.name(), fileInfo.physicalName());
      latch.countDown();
      return;
    }
    final AtomicInteger partIndex = new AtomicInteger();

    blobContainer.readBlob(
        firstFileToRecover,
        new BlobContainer.ReadBlobListener() {
          @Override
          public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
            recoveryStatus.index().addCurrentFilesSize(size);
            indexOutput.writeBytes(data, offset, size);
          }

          @Override
          public synchronized void onCompleted() {
            int part = partIndex.incrementAndGet();
            String partName = fileInfo.name() + ".part" + part;
            if (blobs.containsKey(partName)) {
              // continue with the new part
              blobContainer.readBlob(partName, this);
              return;
            } else {
              // we are done...
              try {
                indexOutput.close();
                // write the checksum
                if (fileInfo.checksum() != null) {
                  store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
                }
                store.directory().sync(Collections.singleton(fileInfo.physicalName()));
              } catch (IOException e) {
                onFailure(e);
                return;
              }
            }
            latch.countDown();
          }

          @Override
          public void onFailure(Throwable t) {
            failures.add(t);
            latch.countDown();
          }
        });
  }
  private void recoverTranslog(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs)
      throws IndexShardGatewayRecoveryException {
    if (commitPoint.translogFiles().isEmpty()) {
      // no translog files, bail
      indexShard.start("post recovery from gateway, no translog");
      return;
    }

    try {
      indexShard.performRecoveryPrepareForTranslog();

      final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
      final CountDownLatch latch = new CountDownLatch(1);

      final Iterator<CommitPoint.FileInfo> transIt = commitPoint.translogFiles().iterator();

      blobContainer.readBlob(
          transIt.next().name(),
          new BlobContainer.ReadBlobListener() {
            FastByteArrayOutputStream bos = new FastByteArrayOutputStream();
            boolean ignore = false;

            @Override
            public synchronized void onPartial(byte[] data, int offset, int size)
                throws IOException {
              if (ignore) {
                return;
              }
              bos.write(data, offset, size);
              // if we don't have enough to read the header size of the first translog, bail and
              // wait for the next one
              if (bos.size() < 4) {
                return;
              }
              BytesStreamInput si = new BytesStreamInput(bos.unsafeByteArray(), 0, bos.size());
              int position;
              while (true) {
                try {
                  position = si.position();
                  if (position + 4 > bos.size()) {
                    break;
                  }
                  int opSize = si.readInt();
                  int curPos = si.position();
                  if ((si.position() + opSize) > bos.size()) {
                    break;
                  }
                  Translog.Operation operation = TranslogStreams.readTranslogOperation(si);
                  if ((si.position() - curPos) != opSize) {
                    logger.warn(
                        "mismatch in size, expected [{}], got [{}]",
                        opSize,
                        si.position() - curPos);
                  }
                  recoveryStatus.translog().addTranslogOperations(1);
                  indexShard.performRecoveryOperation(operation);
                  if (si.position() >= bos.size()) {
                    position = si.position();
                    break;
                  }
                } catch (Exception e) {
                  logger.warn(
                      "failed to retrieve translog after [{}] operations, ignoring the rest, considered corrupted",
                      e,
                      recoveryStatus.translog().currentTranslogOperations());
                  ignore = true;
                  latch.countDown();
                  return;
                }
              }

              FastByteArrayOutputStream newBos = new FastByteArrayOutputStream();

              int leftOver = bos.size() - position;
              if (leftOver > 0) {
                newBos.write(bos.unsafeByteArray(), position, leftOver);
              }

              bos = newBos;
            }

            @Override
            public synchronized void onCompleted() {
              if (ignore) {
                return;
              }
              if (!transIt.hasNext()) {
                latch.countDown();
                return;
              }
              blobContainer.readBlob(transIt.next().name(), this);
            }

            @Override
            public void onFailure(Throwable t) {
              failure.set(t);
              latch.countDown();
            }
          });

      latch.await();
      if (failure.get() != null) {
        throw failure.get();
      }

      indexShard.performRecoveryFinalization(true);
    } catch (Throwable e) {
      throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover translog", e);
    }
  }
  @Override
  public void recover(RecoveryStatus recoveryStatus) throws IndexShardGatewayRecoveryException {
    this.recoveryStatus = recoveryStatus;

    final ImmutableMap<String, BlobMetaData> blobs;
    try {
      blobs = blobContainer.listBlobs();
    } catch (IOException e) {
      throw new IndexShardGatewayRecoveryException(shardId, "Failed to list content of gateway", e);
    }

    List<CommitPoint> commitPointsList = Lists.newArrayList();
    boolean atLeastOneCommitPointExists = false;
    for (String name : blobs.keySet()) {
      if (name.startsWith("commit-")) {
        atLeastOneCommitPointExists = true;
        try {
          commitPointsList.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
        } catch (Exception e) {
          logger.warn("failed to read commit point [{}]", e, name);
        }
      }
    }
    if (atLeastOneCommitPointExists && commitPointsList.isEmpty()) {
      // no commit point managed to load, bail so we won't corrupt the index, will require manual
      // intervention
      throw new IndexShardGatewayRecoveryException(
          shardId, "Commit points exists but none could be loaded", null);
    }
    CommitPoints commitPoints = new CommitPoints(commitPointsList);

    if (commitPoints.commits().isEmpty()) {
      // no commit points, clean the store just so we won't recover wrong files
      try {
        indexShard.store().deleteContent();
      } catch (IOException e) {
        logger.warn("failed to clean store before starting shard", e);
      }
      recoveryStatus.index().startTime(System.currentTimeMillis());
      recoveryStatus.index().time(System.currentTimeMillis() - recoveryStatus.index().startTime());
      recoveryStatus.translog().startTime(System.currentTimeMillis());
      recoveryStatus
          .translog()
          .time(System.currentTimeMillis() - recoveryStatus.index().startTime());
      return;
    }

    for (CommitPoint commitPoint : commitPoints) {
      if (!commitPointExistsInBlobs(commitPoint, blobs)) {
        logger.warn(
            "listed commit_point [{}]/[{}], but not all files exists, ignoring",
            commitPoint.name(),
            commitPoint.version());
        continue;
      }
      try {
        recoveryStatus.index().startTime(System.currentTimeMillis());
        recoveryStatus.updateStage(RecoveryStatus.Stage.INDEX);
        recoverIndex(commitPoint, blobs);
        recoveryStatus
            .index()
            .time(System.currentTimeMillis() - recoveryStatus.index().startTime());

        recoveryStatus.translog().startTime(System.currentTimeMillis());
        recoveryStatus.updateStage(RecoveryStatus.Stage.TRANSLOG);
        recoverTranslog(commitPoint, blobs);
        recoveryStatus
            .translog()
            .time(System.currentTimeMillis() - recoveryStatus.index().startTime());
        return;
      } catch (Exception e) {
        throw new IndexShardGatewayRecoveryException(
            shardId,
            "failed to recover commit_point ["
                + commitPoint.name()
                + "]/["
                + commitPoint.version()
                + "]",
            e);
      }
    }
    throw new IndexShardGatewayRecoveryException(
        shardId, "No commit point data is available in gateway", null);
  }
  private void doSnapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException {
    ImmutableMap<String, BlobMetaData> blobs;
    try {
      blobs = blobContainer.listBlobs();
    } catch (IOException e) {
      throw new IndexShardGatewaySnapshotFailedException(shardId, "failed to list blobs", e);
    }

    long generation = findLatestFileNameGeneration(blobs);
    CommitPoints commitPoints = buildCommitPoints(blobs);

    currentSnapshotStatus.index().startTime(System.currentTimeMillis());
    currentSnapshotStatus.updateStage(SnapshotStatus.Stage.INDEX);

    final SnapshotIndexCommit snapshotIndexCommit = snapshot.indexCommit();
    final Translog.Snapshot translogSnapshot = snapshot.translogSnapshot();

    final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
    final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
    final List<CommitPoint.FileInfo> indexCommitPointFiles = Lists.newArrayList();

    int indexNumberOfFiles = 0;
    long indexTotalFilesSize = 0;
    for (final String fileName : snapshotIndexCommit.getFiles()) {
      StoreFileMetaData md;
      try {
        md = store.metaData(fileName);
      } catch (IOException e) {
        throw new IndexShardGatewaySnapshotFailedException(
            shardId, "Failed to get store file metadata", e);
      }

      boolean snapshotRequired = false;
      if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
        snapshotRequired = true; // we want to always snapshot the segment file if the index changed
      }

      CommitPoint.FileInfo fileInfo = commitPoints.findPhysicalIndexFile(fileName);
      if (fileInfo == null
          || !fileInfo.isSame(md)
          || !commitPointFileExistsInBlobs(fileInfo, blobs)) {
        // commit point file does not exists in any commit point, or has different length, or does
        // not fully exists in the listed blobs
        snapshotRequired = true;
      }

      if (snapshotRequired) {
        indexNumberOfFiles++;
        indexTotalFilesSize += md.length();
        // create a new FileInfo
        try {
          CommitPoint.FileInfo snapshotFileInfo =
              new CommitPoint.FileInfo(
                  fileNameFromGeneration(++generation), fileName, md.length(), md.checksum());
          indexCommitPointFiles.add(snapshotFileInfo);
          snapshotFile(snapshotIndexCommit.getDirectory(), snapshotFileInfo, indexLatch, failures);
        } catch (IOException e) {
          failures.add(e);
          indexLatch.countDown();
        }
      } else {
        indexCommitPointFiles.add(fileInfo);
        indexLatch.countDown();
      }
    }
    currentSnapshotStatus.index().files(indexNumberOfFiles, indexTotalFilesSize);

    try {
      indexLatch.await();
    } catch (InterruptedException e) {
      failures.add(e);
    }
    if (!failures.isEmpty()) {
      throw new IndexShardGatewaySnapshotFailedException(
          shardId(), "Failed to perform snapshot (index files)", failures.get(failures.size() - 1));
    }

    currentSnapshotStatus
        .index()
        .time(System.currentTimeMillis() - currentSnapshotStatus.index().startTime());

    currentSnapshotStatus.updateStage(SnapshotStatus.Stage.TRANSLOG);
    currentSnapshotStatus.translog().startTime(System.currentTimeMillis());

    // Note, we assume the snapshot is always started from "base 0". We need to seek forward if we
    // want to lastTranslogPosition if we want the delta
    List<CommitPoint.FileInfo> translogCommitPointFiles = Lists.newArrayList();
    int expectedNumberOfOperations = 0;
    boolean snapshotRequired = false;
    if (snapshot.newTranslogCreated()) {
      if (translogSnapshot.lengthInBytes() > 0) {
        snapshotRequired = true;
        expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
      }
    } else {
      // if we have a commit point, check that we have all the files listed in it in the blob store
      if (!commitPoints.commits().isEmpty()) {
        CommitPoint commitPoint = commitPoints.commits().get(0);
        boolean allTranslogFilesExists = true;
        for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) {
          if (!commitPointFileExistsInBlobs(fileInfo, blobs)) {
            allTranslogFilesExists = false;
            break;
          }
        }
        // if everything exists, we can seek forward in case there are new operations, otherwise, we
        // copy over all again...
        if (allTranslogFilesExists) {
          translogCommitPointFiles.addAll(commitPoint.translogFiles());
          if (snapshot.sameTranslogNewOperations()) {
            translogSnapshot.seekForward(snapshot.lastTranslogLength());
            if (translogSnapshot.lengthInBytes() > 0) {
              snapshotRequired = true;
              expectedNumberOfOperations =
                  translogSnapshot.estimatedTotalOperations()
                      - snapshot.lastTotalTranslogOperations();
            }
          } // else (no operations, nothing to snapshot)
        } else {
          // a full translog snapshot is required
          if (translogSnapshot.lengthInBytes() > 0) {
            expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
            snapshotRequired = true;
          }
        }
      } else {
        // no commit point, snapshot all the translog
        if (translogSnapshot.lengthInBytes() > 0) {
          expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
          snapshotRequired = true;
        }
      }
    }
    currentSnapshotStatus.translog().expectedNumberOfOperations(expectedNumberOfOperations);

    if (snapshotRequired) {
      CommitPoint.FileInfo addedTranslogFileInfo =
          new CommitPoint.FileInfo(
              fileNameFromGeneration(++generation),
              "translog-" + translogSnapshot.translogId(),
              translogSnapshot.lengthInBytes(),
              null /* no need for checksum in translog */);
      translogCommitPointFiles.add(addedTranslogFileInfo);
      try {
        snapshotTranslog(translogSnapshot, addedTranslogFileInfo);
      } catch (Exception e) {
        throw new IndexShardGatewaySnapshotFailedException(
            shardId, "Failed to snapshot translog", e);
      }
    }
    currentSnapshotStatus
        .translog()
        .time(System.currentTimeMillis() - currentSnapshotStatus.translog().startTime());

    // now create and write the commit point
    currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FINALIZE);
    long version = 0;
    if (!commitPoints.commits().isEmpty()) {
      version = commitPoints.commits().iterator().next().version() + 1;
    }
    String commitPointName = "commit-" + Long.toString(version, Character.MAX_RADIX);
    CommitPoint commitPoint =
        new CommitPoint(
            version,
            commitPointName,
            CommitPoint.Type.GENERATED,
            indexCommitPointFiles,
            translogCommitPointFiles);
    try {
      byte[] commitPointData = CommitPoints.toXContent(commitPoint);
      blobContainer.writeBlob(
          commitPointName, new FastByteArrayInputStream(commitPointData), commitPointData.length);
    } catch (Exception e) {
      throw new IndexShardGatewaySnapshotFailedException(
          shardId, "Failed to write commit point", e);
    }

    // delete all files that are not referenced by any commit point
    // build a new CommitPoint, that includes this one and all the saved ones
    List<CommitPoint> newCommitPointsList = Lists.newArrayList();
    newCommitPointsList.add(commitPoint);
    for (CommitPoint point : commitPoints) {
      if (point.type() == CommitPoint.Type.SAVED) {
        newCommitPointsList.add(point);
      }
    }
    CommitPoints newCommitPoints = new CommitPoints(newCommitPointsList);
    // first, go over and delete all the commit points
    for (String blobName : blobs.keySet()) {
      if (!blobName.startsWith("commit-")) {
        continue;
      }
      long checkedVersion =
          Long.parseLong(blobName.substring("commit-".length()), Character.MAX_RADIX);
      if (!newCommitPoints.hasVersion(checkedVersion)) {
        try {
          blobContainer.deleteBlob(blobName);
        } catch (IOException e) {
          // ignore
        }
      }
    }
    // now go over all the blobs, and if they don't exists in a commit point, delete them
    for (String blobName : blobs.keySet()) {
      String name = blobName;
      if (!name.startsWith("__")) {
        continue;
      }
      if (blobName.contains(".part")) {
        name = blobName.substring(0, blobName.indexOf(".part"));
      }
      if (newCommitPoints.findNameFile(name) == null) {
        try {
          blobContainer.deleteBlob(blobName);
        } catch (IOException e) {
          // ignore, will delete it laters
        }
      }
    }
  }