private boolean commitPointFileExistsInBlobs( CommitPoint.FileInfo fileInfo, ImmutableMap<String, BlobMetaData> blobs) { BlobMetaData blobMetaData = blobs.get(fileInfo.name()); if (blobMetaData != null) { if (blobMetaData.length() != fileInfo.length()) { return false; } } else if (blobs.containsKey(fileInfo.name() + ".part0")) { // multi part file sum up the size and check int part = 0; long totalSize = 0; while (true) { blobMetaData = blobs.get(fileInfo.name() + ".part" + part++); if (blobMetaData == null) { break; } totalSize += blobMetaData.length(); } if (totalSize != fileInfo.length()) { return false; } } else { // no file, not exact and not multipart return false; } return true; }
private void snapshotTranslog(Translog.Snapshot snapshot, CommitPoint.FileInfo fileInfo) throws IOException { blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes()); // // long chunkBytes = Long.MAX_VALUE; // if (chunkSize != null) { // chunkBytes = chunkSize.bytes(); // } // // long totalLength = fileInfo.length(); // long numberOfChunks = totalLength / chunkBytes; // if (totalLength % chunkBytes > 0) { // numberOfChunks++; // } // if (numberOfChunks == 0) { // numberOfChunks++; // } // // if (numberOfChunks == 1) { // blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), // snapshot.lengthInBytes()); // } else { // InputStream translogStream = snapshot.stream(); // long totalLengthLeftToWrite = totalLength; // for (int i = 0; i < numberOfChunks; i++) { // long lengthToWrite = chunkBytes; // if (totalLengthLeftToWrite < chunkBytes) { // lengthToWrite = totalLengthLeftToWrite; // } // blobContainer.writeBlob(fileInfo.name() + ".part" + i, new // LimitInputStream(translogStream, lengthToWrite), lengthToWrite); // totalLengthLeftToWrite -= lengthToWrite; // } // } }
private void snapshotFile( Directory dir, final CommitPoint.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException { long chunkBytes = Long.MAX_VALUE; if (chunkSize != null) { chunkBytes = chunkSize.bytes(); } long totalLength = fileInfo.length(); long numberOfChunks = totalLength / chunkBytes; if (totalLength % chunkBytes > 0) { numberOfChunks++; } if (numberOfChunks == 0) { numberOfChunks++; } final long fNumberOfChunks = numberOfChunks; final AtomicLong counter = new AtomicLong(numberOfChunks); for (long i = 0; i < fNumberOfChunks; i++) { final long partNumber = i; IndexInput indexInput = null; try { indexInput = dir.openInput(fileInfo.physicalName()); indexInput.seek(partNumber * chunkBytes); InputStreamIndexInput is = new ThreadSafeInputStreamIndexInput(indexInput, chunkBytes); String blobName = fileInfo.name(); if (fNumberOfChunks > 1) { // if we do chunks, then all of them are in the form of "[xxx].part[N]". blobName += ".part" + partNumber; } final IndexInput fIndexInput = indexInput; blobContainer.writeBlob( blobName, is, is.actualSizeToRead(), new ImmutableBlobContainer.WriterListener() { @Override public void onCompleted() { try { fIndexInput.close(); } catch (IOException e) { // ignore } if (counter.decrementAndGet() == 0) { latch.countDown(); } } @Override public void onFailure(Throwable t) { try { fIndexInput.close(); } catch (IOException e) { // ignore } failures.add(t); if (counter.decrementAndGet() == 0) { latch.countDown(); } } }); } catch (Exception e) { if (indexInput != null) { try { indexInput.close(); } catch (IOException e1) { // ignore } } failures.add(e); latch.countDown(); } } }
private void recoverFile( final CommitPoint.FileInfo fileInfo, final ImmutableMap<String, BlobMetaData> blobs, final CountDownLatch latch, final List<Throwable> failures) { final IndexOutput indexOutput; try { // we create an output with no checksum, this is because the pure binary data of the file is // not // the checksum (because of seek). We will create the checksum file once copying is done indexOutput = store.createOutputWithNoChecksum(fileInfo.physicalName()); } catch (IOException e) { failures.add(e); latch.countDown(); return; } String firstFileToRecover = fileInfo.name(); if (!blobs.containsKey(fileInfo.name())) { // chunking, append part0 to it firstFileToRecover = fileInfo.name() + ".part0"; } if (!blobs.containsKey(firstFileToRecover)) { // no file, what to do, what to do? logger.warn( "no file [{}]/[{}] to recover, ignoring it", fileInfo.name(), fileInfo.physicalName()); latch.countDown(); return; } final AtomicInteger partIndex = new AtomicInteger(); blobContainer.readBlob( firstFileToRecover, new BlobContainer.ReadBlobListener() { @Override public synchronized void onPartial(byte[] data, int offset, int size) throws IOException { recoveryStatus.index().addCurrentFilesSize(size); indexOutput.writeBytes(data, offset, size); } @Override public synchronized void onCompleted() { int part = partIndex.incrementAndGet(); String partName = fileInfo.name() + ".part" + part; if (blobs.containsKey(partName)) { // continue with the new part blobContainer.readBlob(partName, this); return; } else { // we are done... try { indexOutput.close(); // write the checksum if (fileInfo.checksum() != null) { store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum()); } store.directory().sync(Collections.singleton(fileInfo.physicalName())); } catch (IOException e) { onFailure(e); return; } } latch.countDown(); } @Override public void onFailure(Throwable t) { failures.add(t); latch.countDown(); } }); }