/* * Fetch a file that is in a Hadoop file system. Return a local File. * Interruptible. */ private File hdfsFetch(Path fromPath, Reporter reporter) throws IOException, InterruptedException { UUID uniqueId = UUID.randomUUID(); File toFile = new File(tempDir, uniqueId.toString() + "/" + fromPath.getName()); File toDir = new File(toFile.getParent()); if (toDir.exists()) { FileUtils.deleteDirectory(toDir); } toDir.mkdirs(); Path toPath = new Path(toFile.getCanonicalPath()); FileSystem fS = fromPath.getFileSystem(hadoopConf); FileSystem tofS = FileSystem.getLocal(hadoopConf); Throttler throttler = new Throttler((double) bytesPerSecThrottle); try { for (FileStatus fStatus : fS.globStatus(fromPath)) { log.info("Copying " + fStatus.getPath() + " to " + toPath); long bytesSoFar = 0; FSDataInputStream iS = fS.open(fStatus.getPath()); FSDataOutputStream oS = tofS.create(toPath); byte[] buffer = new byte[downloadBufferSize]; int nRead; while ((nRead = iS.read(buffer, 0, buffer.length)) != -1) { // Needed to being able to be interrupted at any moment. if (Thread.interrupted()) { iS.close(); oS.close(); cleanDirNoExceptions(toDir); throw new InterruptedException(); } bytesSoFar += nRead; oS.write(buffer, 0, nRead); throttler.incrementAndThrottle(nRead); if (bytesSoFar >= bytesToReportProgress) { reporter.progress(bytesSoFar); bytesSoFar = 0l; } } if (reporter != null) { reporter.progress(bytesSoFar); } oS.close(); iS.close(); } return toDir; } catch (ClosedByInterruptException e) { // This can be thrown by the method read. cleanDirNoExceptions(toDir); throw new InterruptedIOException(); } }
private void copyFile(File sourceFile, File destFile, Reporter reporter) throws IOException { if (!destFile.exists()) { destFile.createNewFile(); } FileChannel source = null; FileChannel destination = null; Throttler throttler = new Throttler((double) bytesPerSecThrottle); FileInputStream iS = null; FileOutputStream oS = null; try { iS = new FileInputStream(sourceFile); oS = new FileOutputStream(destFile); source = iS.getChannel(); destination = oS.getChannel(); long bytesSoFar = 0; long reportingBytesSoFar = 0; long size = source.size(); int transferred = 0; while (bytesSoFar < size) { // Casting to int here is safe since we will transfer at most "downloadBufferSize" bytes. // This is done on purpose for being able to implement Throttling. transferred = (int) destination.transferFrom(source, bytesSoFar, downloadBufferSize); bytesSoFar += transferred; reportingBytesSoFar += transferred; throttler.incrementAndThrottle(transferred); if (reportingBytesSoFar >= bytesToReportProgress) { reporter.progress(reportingBytesSoFar); reportingBytesSoFar = 0l; } } if (reporter != null) { reporter.progress(reportingBytesSoFar); } } finally { if (iS != null) { iS.close(); } if (oS != null) { oS.close(); } if (source != null) { source.close(); } if (destination != null) { destination.close(); } } }
public void testTimeSlotCalculus() throws Exception { Throttler throttler = new Throttler(null, 2, 1000, null); TimeSlot slot = throttler.nextSlot(); // start a new time slot assertNotNull(slot); // make sure the same slot is used (2 exchanges per slot) assertSame(slot, throttler.nextSlot()); assertTrue(slot.isFull()); TimeSlot next = throttler.nextSlot(); // now we should have a new slot that starts somewhere in the future assertNotSame(slot, next); assertFalse(next.isActive()); }
/* * Fetch a file that is in a Hadoop file system. Return a local File. */ private File hdfsFetch(Path fromPath, Reporter reporter) throws IOException { File toFile = new File(tempDir, fromPath.toUri().getPath()); File toDir = new File(toFile.getParent()); if (toDir.exists()) { FileUtils.deleteDirectory(toDir); } toDir.mkdirs(); Path toPath = new Path(toFile.getCanonicalPath()); FileSystem fS = fromPath.getFileSystem(hadoopConf); FileSystem tofS = FileSystem.getLocal(hadoopConf); Throttler throttler = new Throttler((double) bytesPerSecThrottle); for (FileStatus fStatus : fS.globStatus(fromPath)) { log.info("Copying " + fStatus.getPath() + " to " + toPath); long bytesSoFar = 0; FSDataInputStream iS = fS.open(fStatus.getPath()); FSDataOutputStream oS = tofS.create(toPath); byte[] buffer = new byte[downloadBufferSize]; int nRead; while ((nRead = iS.read(buffer, 0, buffer.length)) != -1) { bytesSoFar += nRead; oS.write(buffer, 0, nRead); throttler.incrementAndThrottle(nRead); if (bytesSoFar >= bytesToReportProgress) { reporter.progress(bytesSoFar); bytesSoFar = 0l; } } if (reporter != null) { reporter.progress(bytesSoFar); } oS.close(); iS.close(); } return toDir; }
/* * Fetch a file that is in a S3 file system. Return a local File. It accepts "s3://" and "s3n://" prefixes. */ private File s3Fetch(URI uri, Reporter reporter) throws IOException { String bucketName = uri.getHost(); String path = uri.getPath(); File destFolder = new File(tempDir, bucketName + "/" + path); if (destFolder.exists()) { FileUtils.deleteDirectory(destFolder); } destFolder.mkdirs(); Throttler throttler = new Throttler((double) bytesPerSecThrottle); boolean done = false; try { s3Service = new RestS3Service(getCredentials()); if (s3Service.checkBucketStatus(bucketName) != RestS3Service.BUCKET_STATUS__MY_BUCKET) { throw new IOException("Bucket doesn't exist or is already claimed: " + bucketName); } if (path.startsWith("/")) { path = path.substring(1, path.length()); } for (S3Object object : s3Service.listObjects(new S3Bucket(bucketName), path, "")) { long bytesSoFar = 0; String fileName = path; if (path.contains("/")) { fileName = path.substring(path.lastIndexOf("/") + 1, path.length()); } File fileDest = new File(destFolder, fileName); log.info("Downloading " + object.getKey() + " to " + fileDest + " ..."); if (fileDest.exists()) { fileDest.delete(); } object = s3Service.getObject(new S3Bucket(bucketName), object.getKey()); InputStream iS = object.getDataInputStream(); FileOutputStream writer = new FileOutputStream(fileDest); byte[] buffer = new byte[downloadBufferSize]; int nRead; while ((nRead = iS.read(buffer, 0, buffer.length)) != -1) { bytesSoFar += nRead; writer.write(buffer, 0, nRead); throttler.incrementAndThrottle(nRead); if (bytesSoFar >= bytesToReportProgress) { reporter.progress(bytesSoFar); bytesSoFar = 0l; } } if (reporter != null) { reporter.progress(bytesSoFar); } writer.close(); iS.close(); done = true; } if (!done) { throw new IOException("Bucket is empty! " + bucketName + " path: " + path); } } catch (S3ServiceException e) { throw new IOException(e); } return destFolder; }