private void recoverFile(
      final CommitPoint.FileInfo fileInfo,
      final ImmutableMap<String, BlobMetaData> blobs,
      final CountDownLatch latch,
      final List<Throwable> failures) {
    final IndexOutput indexOutput;
    try {
      // we create an output with no checksum, this is because the pure binary data of the file is
      // not
      // the checksum (because of seek). We will create the checksum file once copying is done
      indexOutput = store.createOutputWithNoChecksum(fileInfo.physicalName());
    } catch (IOException e) {
      failures.add(e);
      latch.countDown();
      return;
    }

    String firstFileToRecover = fileInfo.name();
    if (!blobs.containsKey(fileInfo.name())) {
      // chunking, append part0 to it
      firstFileToRecover = fileInfo.name() + ".part0";
    }
    if (!blobs.containsKey(firstFileToRecover)) {
      // no file, what to do, what to do?
      logger.warn(
          "no file [{}]/[{}] to recover, ignoring it", fileInfo.name(), fileInfo.physicalName());
      latch.countDown();
      return;
    }
    final AtomicInteger partIndex = new AtomicInteger();

    blobContainer.readBlob(
        firstFileToRecover,
        new BlobContainer.ReadBlobListener() {
          @Override
          public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
            recoveryStatus.index().addCurrentFilesSize(size);
            indexOutput.writeBytes(data, offset, size);
          }

          @Override
          public synchronized void onCompleted() {
            int part = partIndex.incrementAndGet();
            String partName = fileInfo.name() + ".part" + part;
            if (blobs.containsKey(partName)) {
              // continue with the new part
              blobContainer.readBlob(partName, this);
              return;
            } else {
              // we are done...
              try {
                indexOutput.close();
                // write the checksum
                if (fileInfo.checksum() != null) {
                  store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
                }
                store.directory().sync(Collections.singleton(fileInfo.physicalName()));
              } catch (IOException e) {
                onFailure(e);
                return;
              }
            }
            latch.countDown();
          }

          @Override
          public void onFailure(Throwable t) {
            failures.add(t);
            latch.countDown();
          }
        });
  }
  private void recoverTranslog(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs)
      throws IndexShardGatewayRecoveryException {
    if (commitPoint.translogFiles().isEmpty()) {
      // no translog files, bail
      indexShard.start("post recovery from gateway, no translog");
      return;
    }

    try {
      indexShard.performRecoveryPrepareForTranslog();

      final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
      final CountDownLatch latch = new CountDownLatch(1);

      final Iterator<CommitPoint.FileInfo> transIt = commitPoint.translogFiles().iterator();

      blobContainer.readBlob(
          transIt.next().name(),
          new BlobContainer.ReadBlobListener() {
            FastByteArrayOutputStream bos = new FastByteArrayOutputStream();
            boolean ignore = false;

            @Override
            public synchronized void onPartial(byte[] data, int offset, int size)
                throws IOException {
              if (ignore) {
                return;
              }
              bos.write(data, offset, size);
              // if we don't have enough to read the header size of the first translog, bail and
              // wait for the next one
              if (bos.size() < 4) {
                return;
              }
              BytesStreamInput si = new BytesStreamInput(bos.unsafeByteArray(), 0, bos.size());
              int position;
              while (true) {
                try {
                  position = si.position();
                  if (position + 4 > bos.size()) {
                    break;
                  }
                  int opSize = si.readInt();
                  int curPos = si.position();
                  if ((si.position() + opSize) > bos.size()) {
                    break;
                  }
                  Translog.Operation operation = TranslogStreams.readTranslogOperation(si);
                  if ((si.position() - curPos) != opSize) {
                    logger.warn(
                        "mismatch in size, expected [{}], got [{}]",
                        opSize,
                        si.position() - curPos);
                  }
                  recoveryStatus.translog().addTranslogOperations(1);
                  indexShard.performRecoveryOperation(operation);
                  if (si.position() >= bos.size()) {
                    position = si.position();
                    break;
                  }
                } catch (Exception e) {
                  logger.warn(
                      "failed to retrieve translog after [{}] operations, ignoring the rest, considered corrupted",
                      e,
                      recoveryStatus.translog().currentTranslogOperations());
                  ignore = true;
                  latch.countDown();
                  return;
                }
              }

              FastByteArrayOutputStream newBos = new FastByteArrayOutputStream();

              int leftOver = bos.size() - position;
              if (leftOver > 0) {
                newBos.write(bos.unsafeByteArray(), position, leftOver);
              }

              bos = newBos;
            }

            @Override
            public synchronized void onCompleted() {
              if (ignore) {
                return;
              }
              if (!transIt.hasNext()) {
                latch.countDown();
                return;
              }
              blobContainer.readBlob(transIt.next().name(), this);
            }

            @Override
            public void onFailure(Throwable t) {
              failure.set(t);
              latch.countDown();
            }
          });

      latch.await();
      if (failure.get() != null) {
        throw failure.get();
      }

      indexShard.performRecoveryFinalization(true);
    } catch (Throwable e) {
      throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover translog", e);
    }
  }