@Test public void testRestrictedRead() throws IOException { FileSystemPartitionView<TestRecord> partition0 = partitioned.getPartitionView(URI.create("id_hash=0")); FileSystemPartitionView<TestRecord> partition1 = partitioned.getPartitionView(URI.create("id_hash=1")); FileSystemPartitionView<TestRecord> partition2 = partitioned.getPartitionView(URI.create("id_hash=2")); FileSystemPartitionView<TestRecord> partition3 = partitioned.getPartitionView(URI.create("id_hash=3")); int count0 = DatasetTestUtilities.materialize(partition0).size(); int total = DatasetTestUtilities.materialize(partitioned).size(); Assert.assertTrue("Should read some records", count0 > 0); Assert.assertTrue("Should not read the entire dataset", count0 < total); // move other partitions so they match the partition0 constraint FileSystem local = LocalFileSystem.getInstance(); local.rename(new Path(partition1.getLocation()), new Path(partitioned.getDirectory(), "0")); local.rename( new Path(partition2.getLocation()), new Path(partitioned.getDirectory(), "hash=0")); local.rename( new Path(partition3.getLocation()), new Path(partitioned.getDirectory(), "id_hash=00")); int newCount0 = DatasetTestUtilities.materialize(partition0).size(); Assert.assertEquals("Should match original count", count0, newCount0); int countByConstraints = DatasetTestUtilities.materialize(partition0.toConstraintsView()).size(); Assert.assertEquals("Should match total count", total, countByConstraints); }
@Test public void errorRenameTest() throws Exception { // Rename /dirA to /dirA/dirB should fail { Path dirA = new Path("/dirA"); Path finalDst = new Path("/dirA/dirB"); sTFS.mkdirs(dirA); Assert.assertFalse(sTFS.rename(dirA, finalDst)); Assert.assertFalse(sTFS.exists(finalDst)); Assert.assertTrue(sTFS.exists(dirA)); cleanup(sTFS); } // Rename /fileA to /fileB should fail if /fileB exists { Path fileA = new Path("/fileA"); Path fileB = new Path("/fileB"); create(sTFS, fileA); create(sTFS, fileB); Assert.assertFalse(sTFS.rename(fileA, fileB)); Assert.assertTrue(sTFS.exists(fileA)); Assert.assertTrue(sTFS.exists(fileB)); cleanup(sTFS); } // Rename /fileA to /dirA/fileA should fail if /dirA/fileA exists { Path fileA = new Path("/fileA"); Path dirA = new Path("/dirA"); Path finalDst = new Path("/dirA/fileA"); create(sTFS, fileA); create(sTFS, finalDst); sTFS.mkdirs(dirA); Assert.assertFalse(sTFS.rename(fileA, dirA)); Assert.assertTrue(sTFS.exists(fileA)); Assert.assertTrue(sTFS.exists(dirA)); Assert.assertTrue(sTFS.exists(finalDst)); cleanup(sTFS); } // Rename /fileA to an nonexistent path should fail { Path fileA = new Path("/fileA"); Path nonexistentPath = new Path("/doesNotExist/fileA"); create(sTFS, fileA); Assert.assertFalse(sTFS.rename(fileA, nonexistentPath)); Assert.assertTrue(sTFS.exists(fileA)); cleanup(sTFS); } }
/** Insert rows through staging phase */ private void insertRowsThroughStaging( TaskAttemptContext taskAttemptContext, InsertNode insertNode, Path finalOutputPath, Path stagingDir, Path stagingResultDir) throws IOException { EvalExprExec evalExprExec = new EvalExprExec(taskAttemptContext, (EvalExprNode) insertNode.getChild()); InsertRowsExec exec = new InsertRowsExec(taskAttemptContext, insertNode, evalExprExec); try { exec.init(); exec.next(); } finally { exec.close(); } FileSystem fs = TajoConf.getWarehouseDir(context.getConf()).getFileSystem(context.getConf()); if (insertNode.isOverwrite()) { // INSERT OVERWRITE INTO // it moves the original table into the temporary location. // Then it moves the new result table into the original table location. // Upon failed, it recovers the original table if possible. boolean movedToOldTable = false; boolean committed = false; Path oldTableDir = new Path(stagingDir, TajoConstants.INSERT_OVERWIRTE_OLD_TABLE_NAME); try { if (fs.exists(finalOutputPath)) { fs.rename(finalOutputPath, oldTableDir); movedToOldTable = fs.exists(oldTableDir); } else { // if the parent does not exist, make its parent directory. fs.mkdirs(finalOutputPath.getParent()); } fs.rename(stagingResultDir, finalOutputPath); committed = fs.exists(finalOutputPath); } catch (IOException ioe) { // recover the old table if (movedToOldTable && !committed) { fs.rename(oldTableDir, finalOutputPath); } } } else { FileStatus[] files = fs.listStatus(stagingResultDir); for (FileStatus eachFile : files) { Path targetFilePath = new Path(finalOutputPath, eachFile.getPath().getName()); if (fs.exists(targetFilePath)) { targetFilePath = new Path( finalOutputPath, eachFile.getPath().getName() + "_" + System.currentTimeMillis()); } fs.rename(eachFile.getPath(), targetFilePath); } } }
/** * Rename file using prefix and suffix settings. * * @param path the path to rename */ protected void renameFile(Path path) { // bail out if there's no in-writing settings if (!StringUtils.hasText(prefix) && !StringUtils.hasText(suffix)) { return; } String name = path.getName(); if (StringUtils.startsWithIgnoreCase(name, prefix)) { name = name.substring(prefix.length()); } if (StringUtils.endsWithIgnoreCase(name, suffix)) { name = name.substring(0, name.length() - suffix.length()); } Path toPath = new Path(path.getParent(), name); try { FileSystem fs = path.getFileSystem(getConfiguration()); if (!fs.rename(path, toPath)) { throw new StoreException( "Failed renaming from " + path + " to " + toPath + " with configuration " + getConfiguration()); } } catch (IOException e) { log.error("Error renaming file", e); throw new StoreException("Error renaming file", e); } }
@Override public boolean rename(String src, String dst) throws IOException { LOG.debug("Renaming from {} to {}", src, dst); if (!exists(src)) { LOG.error("File " + src + " does not exist. Therefore rename to " + dst + " failed."); return false; } if (exists(dst)) { LOG.error("File " + dst + " does exist. Therefore rename from " + src + " failed."); return false; } int cnt = 0; IOException te = null; while (cnt < MAX_TRY) { try { return mFs.rename(new Path(src), new Path(dst)); } catch (IOException e) { cnt++; LOG.error(cnt + " try to rename " + src + " to " + dst + " : " + e.getMessage(), e); te = e; } } throw te; }
/** * HDFS 상에서 지정한 파일을 다른 디렉토리로 파일을 이동시킨다. * * @param conf Hadoop Configuration * @param delayFiles 이동할 파일 목록 * @param targetDirectory 목적 디렉토리 * @throws java.io.IOException 파일을 이동할 수 없는 경우 */ public static void moveFilesToDirectory( Configuration conf, List<String> delayFiles, String targetDirectory) throws IOException { for (String path : delayFiles) { String filename = FileUtils.getFilename(path); String delayedFilePrefix = filename.split("-")[0]; String outputHead = delayedFilePrefix.replaceAll("delay", ""); String outputMiddle = delayedFilePrefix.substring(0, 5); // todo String outputTail = filename.replaceAll(delayedFilePrefix, ""); System.out.println( "Acceleration Dir " + targetDirectory + "/" + outputHead + "_" + outputMiddle + outputTail); makeDirectoryIfNotExists(targetDirectory, conf); FileSystem fileSystem = FileSystem.get(conf); fileSystem.rename( new Path(path), new Path(targetDirectory + "/" + outputHead + "_" + outputMiddle + outputTail)); System.out.println("\t Moved: '" + path + "' --> '" + targetDirectory + "'"); } }
/** * 입력으로 선택한 경로를 지정한 목적 경로로 이동한다. * * @param source 이동할 경로 * @param target 이동할 위치 * @param fs Hadoop FileSystem */ public static void move(String source, String target, FileSystem fs) throws Exception { Path srcPath = new Path(source); Path[] srcs = FileUtil.stat2Paths(fs.globStatus(srcPath), srcPath); Path dst = new Path(target); if (srcs.length > 1 && !fs.getFileStatus(dst).isDir()) { throw new FileSystemException( "When moving multiple files, destination should be a directory."); } for (int i = 0; i < srcs.length; i++) { if (!fs.rename(srcs[i], dst)) { FileStatus srcFstatus = null; FileStatus dstFstatus = null; try { srcFstatus = fs.getFileStatus(srcs[i]); } catch (FileNotFoundException e) { throw new FileNotFoundException(srcs[i] + ": No such file or directory"); } try { dstFstatus = fs.getFileStatus(dst); } catch (IOException e) { // Nothing } if ((srcFstatus != null) && (dstFstatus != null)) { if (srcFstatus.isDir() && !dstFstatus.isDir()) { throw new FileSystemException( "cannot overwrite non directory " + dst + " with directory " + srcs[i]); } } throw new FileSystemException("Failed to rename " + srcs[i] + " to " + dst); } } }
private void testMovingFiles(boolean useAcl) throws Exception { // Create a tmp directory with wide-open permissions and sticky bit Path tmpPath = new Path("/tmp"); Path tmpPath2 = new Path("/tmp2"); hdfs.mkdirs(tmpPath); hdfs.mkdirs(tmpPath2); hdfs.setPermission(tmpPath, new FsPermission((short) 01777)); if (useAcl) { applyAcl(tmpPath); } hdfs.setPermission(tmpPath2, new FsPermission((short) 01777)); if (useAcl) { applyAcl(tmpPath2); } // Write a file to the new tmp directory as a regular user Path file = new Path(tmpPath, "foo"); writeFile(hdfsAsUser1, file); // Log onto cluster as another user and attempt to move the file try { hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed")); fail("Shouldn't be able to rename someone else's file with SB on"); } catch (IOException ioe) { assertTrue(ioe instanceof AccessControlException); assertTrue(ioe.getMessage().contains("sticky bit")); } }
private void markFileAsBad(Path file) { String fileName = file.toString(); String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix)); String originalName = new Path(fileNameMinusSuffix).getName(); Path newFile = new Path(badFilesDirPath + Path.SEPARATOR + originalName); LOG.info( "Moving bad file {} to {}. Processed it till offset {}. SpoutID= {}", originalName, newFile, tracker.getCommitPosition(), spoutId); try { if (!hdfs.rename( file, newFile)) { // seems this can fail by returning false or throwing exception throw new IOException( "Move failed for bad file: " + file); // convert false ret value to exception } } catch (IOException e) { LOG.warn( "Error moving bad file: " + file + " to destination " + newFile + " SpoutId =" + spoutId, e); } closeReaderAndResetTrackers(); }
@Override public String prepareBulkLoad(final byte[] family, final String srcPath) throws IOException { Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); if (srcFs == null) { srcFs = FileSystem.get(p.toUri(), conf); } if (!isFile(p)) { throw new IOException("Path does not reference a file: " + p); } // Check to see if the source and target filesystems are the same if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) { LOG.debug( "Bulk-load file " + srcPath + " is on different filesystem than " + "the destination filesystem. Copying file over to destination staging dir."); FileUtil.copy(srcFs, p, fs, stageP, false, conf); } else { LOG.debug("Moving " + p + " to " + stageP); FileStatus origFileStatus = fs.getFileStatus(p); origPermissions.put(srcPath, origFileStatus.getPermission()); if (!fs.rename(p, stageP)) { throw new IOException("Failed to move HFile: " + p + " to " + stageP); } } fs.setPermission(stageP, PERM_ALL_ACCESS); return stageP.toString(); }
@Override public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException { if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) { // files are copied so no need to move them back return; } Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); // In case of Replication for bulk load files, hfiles are not renamed by end point during // prepare stage, so no need of rename here again if (p.equals(stageP)) { LOG.debug(p.getName() + " is already available in source directory. Skipping rename."); return; } LOG.debug("Moving " + stageP + " back to " + p); if (!fs.rename(stageP, p)) throw new IOException("Failed to move HFile: " + stageP + " to " + p); // restore original permission if (origPermissions.containsKey(srcPath)) { fs.setPermission(p, origPermissions.get(srcPath)); } else { LOG.warn("Can't find previous permission for path=" + srcPath); } }
@Override public void merge(FileSystemDataset<E> update) { DatasetDescriptor updateDescriptor = update.getDescriptor(); if (!updateDescriptor.getFormat().equals(descriptor.getFormat())) { throw new DatasetRepositoryException( "Cannot merge dataset format " + updateDescriptor.getFormat() + " with format " + descriptor.getFormat()); } if (updateDescriptor.isPartitioned() != descriptor.isPartitioned()) { throw new DatasetRepositoryException( "Cannot merge an unpartitioned dataset with a " + " partitioned one or vice versa."); } else if (updateDescriptor.isPartitioned() && descriptor.isPartitioned() && !updateDescriptor.getPartitionStrategy().equals(descriptor.getPartitionStrategy())) { throw new DatasetRepositoryException( "Cannot merge dataset partition strategy " + updateDescriptor.getPartitionStrategy() + " with " + descriptor.getPartitionStrategy()); } if (!updateDescriptor.getSchema().equals(descriptor.getSchema())) { throw new DatasetRepositoryException( "Cannot merge dataset schema " + updateDescriptor.getFormat() + " with schema " + descriptor.getFormat()); } Set<String> addedPartitions = Sets.newHashSet(); for (Path path : update.pathIterator()) { URI relativePath = update.getDirectory().toUri().relativize(path.toUri()); Path newPath = new Path(directory, new Path(relativePath)); Path newPartitionDirectory = newPath.getParent(); try { if (!fileSystem.exists(newPartitionDirectory)) { fileSystem.mkdirs(newPartitionDirectory); } logger.debug("Renaming {} to {}", path, newPath); boolean renameOk = fileSystem.rename(path, newPath); if (!renameOk) { throw new DatasetException( "Dataset merge failed during rename of " + path + " to " + newPath); } } catch (IOException e) { throw new DatasetIOException("Dataset merge failed", e); } if (descriptor.isPartitioned() && partitionListener != null) { String partition = newPartitionDirectory.toString(); if (!addedPartitions.contains(partition)) { partitionListener.partitionAdded(name, partition); addedPartitions.add(partition); } } } }
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { int addedIterateTime; try { addedIterateTime = Integer.parseInt(args[0]); } catch (Exception e) { System.out.println("use> AddIterator num"); return; } Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); fs.rename(new Path(LabelRun.COMMUNITY), new Path(LabelRun.TMP_LABEL)); for (int i = 0; i < addedIterateTime; i++) LabelRun.runLabelPropagation(conf, fs); fs.rename(new Path(LabelRun.TMP_LABEL), new Path(LabelRun.COMMUNITY)); }
/** * 重命名单个文件 * * @param filePath 包含路径的文件名 */ public static void renameFile(String srcFilePath, String targetFilePath) { try { fs.rename(new Path(srcFilePath), new Path(targetFilePath)); } catch (IllegalArgumentException | IOException e) { LOG.error("重命名文件操作出错: " + e.getMessage()); e.printStackTrace(); } }
static boolean canRename(FileSystem fs, Path src, Path dst) throws IOException { try { fs.rename(src, dst); return true; } catch (AccessControlException e) { return false; } }
/** Move files from source to target using a specified starting partition. */ private void moveFiles(FileSystem fs, Path sourceDir, Path targetDir, int partitionStart) throws IOException { NumberFormat numpart = NumberFormat.getInstance(); numpart.setMinimumIntegerDigits(PARTITION_DIGITS); numpart.setGroupingUsed(false); Pattern patt = Pattern.compile("part.*-([0-9][0-9][0-9][0-9][0-9]).*"); FileStatus[] tempFiles = fs.listStatus(sourceDir); if (null == tempFiles) { // If we've already checked that the dir exists, and now it can't be // listed, this is a genuine error (permissions, fs integrity, or other). throw new IOException("Could not list files from " + sourceDir); } // Move and rename files & directories from temporary to target-dir thus // appending file's next partition for (FileStatus fileStat : tempFiles) { if (!fileStat.isDir()) { // Move imported data files String filename = fileStat.getPath().getName(); Matcher mat = patt.matcher(filename); if (mat.matches()) { String name = getFilename(filename); String fileToMove = name.concat(numpart.format(partitionStart++)); String extension = getFileExtension(filename); if (extension != null) { fileToMove = fileToMove.concat(extension); } LOG.debug("Filename: " + filename + " repartitioned to: " + fileToMove); fs.rename(fileStat.getPath(), new Path(targetDir, fileToMove)); } } else { // Move directories (_logs & any other) String dirName = fileStat.getPath().getName(); Path path = new Path(targetDir, dirName); int dirNumber = 0; while (fs.exists(path)) { path = new Path(targetDir, dirName.concat("-").concat(numpart.format(dirNumber++))); } LOG.debug("Directory: " + dirName + " renamed to: " + path.getName()); fs.rename(fileStat.getPath(), path); } } }
private void moveTmpToDone(Path tmpPath) throws IOException { if (tmpPath != null) { String tmpFileName = tmpPath.getName(); String fileName = getFileNameFromTmpFN(tmpFileName); Path path = new Path(tmpPath.getParent(), fileName); doneDirFS.rename(tmpPath, path); LOG.info("Moved tmp to done: " + tmpPath + " to " + path); } }
@Test public void testDeleteMissing() { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl( taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); fs.rename(new Path(targetBaseAdd), new Path(targetBase)); DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)), new Path("/out")); options.setSyncFolder(true); options.setDeleteMissing(true); options.appendToConf(conf); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, options); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) { Assert.fail("Source and target folders are not in sync"); } if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) { Assert.fail("Source and target folders are not in sync"); } // Test for idempotent commit committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, targetBase, sourceBase)) { Assert.fail("Source and target folders are not in sync"); } if (!TestDistCpUtils.checkIfFoldersAreInSync(fs, sourceBase, targetBase)) { Assert.fail("Source and target folders are not in sync"); } } catch (Throwable e) { LOG.error("Exception encountered while testing for delete missing", e); Assert.fail("Delete missing failure"); } finally { TestDistCpUtils.delete(fs, "/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false"); } }
/** * This implements matrix multiplication A * B using MapReduce tasks on CPU or GPU * * @param other a DistributedRowMatrix * @param outPath path to write result to * @param useGPU use GPU or CPU (default: false, use CPU) * @return a DistributedRowMatrix containing the product */ public DistributedRowMatrix multiplyBSP( DistributedRowMatrix other, Path outPath, boolean useGPU, boolean transposeMatrixA) throws IOException, ClassNotFoundException, InterruptedException { // Check if cols of MatrixA = rows of MatrixB // (l x m) * (m x n) = (l x n) if (numCols != other.numRows()) { throw new CardinalityException(numCols, other.numRows()); } Configuration initialConf = (getConf() == null) ? new HamaConfiguration() : getConf(); // Transpose Matrix within a new MapReduce Job DistributedRowMatrix transposed = this; if (transposeMatrixA) { transposed = transposed.transpose(); } // Debug // System.out.println("DistributedRowMatrix transposed:"); // transposed.printDistributedRowMatrix(); // Build MatrixMultiplication job configuration BSPJob job = null; if (!useGPU) { job = MatrixMultiplicationBSPCpu.createMatrixMultiplicationBSPCpuConf( initialConf, transposed.rowPath, other.rowPath, outPath.getParent(), other.numCols); } else { // use GPU job = MatrixMultiplicationBSPGpu.createMatrixMultiplicationBSPGpuConf( initialConf, transposed.rowPath, other.rowPath, outPath.getParent(), other.numCols); } // Multiply Matrix with transposed one if (job.waitForCompletion(true)) { // Rename result file to output path Configuration conf = job.getConfiguration(); FileSystem fs = outPath.getFileSystem(conf); FileStatus[] files = fs.listStatus(outPath.getParent()); for (int i = 0; i < files.length; i++) { if ((files[i].getPath().getName().startsWith("part-")) && (files[i].getLen() > 97)) { fs.rename(files[i].getPath(), outPath); break; } } // Read resulting Matrix from HDFS DistributedRowMatrix out = new DistributedRowMatrix(outPath, outputTmpPath, this.numRows, other.numCols()); out.setConf(conf); return out; } return null; }
/** * Commit a merged region, moving it from the merges temporary directory to the proper location in * the filesystem. * * @param mergedRegionInfo merged region {@link HRegionInfo} * @throws IOException */ void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException { Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName()); Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo); // Move the tmp dir in the expected location if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) { if (!fs.rename(mergedRegionTmpDir, regionDir)) { throw new IOException("Unable to rename " + mergedRegionTmpDir + " to " + regionDir); } } }
/** * Renames files with .inprogress suffix * * @return path of renamed file * @throws if operation fails */ private Path renameToInProgressFile(Path file) throws IOException { Path newFile = new Path(file.toString() + inprogress_suffix); try { if (hdfs.rename(file, newFile)) { return newFile; } throw new RenameException(file, newFile); } catch (IOException e) { throw new RenameException(file, newFile, e); } }
@Override public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { if (outputStream != null) { // Close the output stream so that the tmp file is synced, then move it. outputStream.close(); LOG.info("In close, now renaming " + tmpIndexPath + " to final location " + realIndexPath); // Rename, indexing completed. fs.rename(tmpIndexPath, realIndexPath); } }
/** * @param fs * @param hTableDescriptor * @param tableDir * @param status * @return Descriptor file or null if we failed write. * @throws IOException */ private static Path writeTableDescriptor( final FileSystem fs, final HTableDescriptor hTableDescriptor, final Path tableDir, final FileStatus status) throws IOException { // Get temporary dir into which we'll first write a file to avoid // half-written file phenomeon. Path tmpTableDir = new Path(tableDir, ".tmp"); // What is current sequenceid? We read the current sequenceid from // the current file. After we read it, another thread could come in and // compete with us writing out next version of file. The below retries // should help in this case some but its hard to do guarantees in face of // concurrent schema edits. int currentSequenceid = status == null ? 0 : getTableInfoSequenceid(status.getPath()); int sequenceid = currentSequenceid; // Put arbitrary upperbound on how often we retry int retries = 10; int retrymax = currentSequenceid + retries; Path tableInfoPath = null; do { sequenceid += 1; Path p = getTableInfoFileName(tmpTableDir, sequenceid); if (fs.exists(p)) { LOG.debug(p + " exists; retrying up to " + retries + " times"); continue; } try { writeHTD(fs, p, hTableDescriptor); tableInfoPath = getTableInfoFileName(tableDir, sequenceid); if (!fs.rename(p, tableInfoPath)) { throw new IOException("Failed rename of " + p + " to " + tableInfoPath); } } catch (IOException ioe) { // Presume clash of names or something; go around again. LOG.debug("Failed write and/or rename; retrying", ioe); if (!FSUtils.deleteDirectory(fs, p)) { LOG.warn("Failed cleanup of " + p); } tableInfoPath = null; continue; } // Cleanup old schema file. if (status != null) { if (!FSUtils.deleteDirectory(fs, status.getPath())) { LOG.warn("Failed delete of " + status.getPath() + "; continuing"); } } break; } while (sequenceid < retrymax); return tableInfoPath; }
@Override public final void close() { if (state.equals(ReaderWriterState.OPEN)) { try { appender.close(); } catch (IOException e) { this.state = ReaderWriterState.ERROR; throw new DatasetIOException("Failed to close appender " + appender, e); } if (count > 0) { // commit the temp file try { if (!fs.rename(tempPath, finalPath)) { this.state = ReaderWriterState.ERROR; throw new DatasetWriterException("Failed to move " + tempPath + " to " + finalPath); } } catch (IOException e) { this.state = ReaderWriterState.ERROR; throw new DatasetIOException("Failed to commit " + finalPath, e); } LOG.debug( "Committed {} for appender {} ({} entities)", new Object[] {finalPath, appender, count}); } else { // discard the temp file try { if (!fs.delete(tempPath, true)) { this.state = ReaderWriterState.ERROR; throw new DatasetWriterException("Failed to delete " + tempPath); } } catch (IOException e) { this.state = ReaderWriterState.ERROR; throw new DatasetIOException("Failed to remove temporary file " + tempPath, e); } LOG.debug("Discarded {} ({} entities)", tempPath, count); } try { appender.cleanup(); } catch (IOException e) { throw new DatasetIOException("Failed to clean up " + appender, e); } this.state = ReaderWriterState.CLOSED; } else if (state.equals(ReaderWriterState.ERROR)) { this.state = ReaderWriterState.CLOSED; } }
// renames files and returns the new file path private Path renameCompletedFile(Path file) throws IOException { String fileName = file.toString(); String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix)); String newName = new Path(fileNameMinusSuffix).getName(); Path newFile = new Path(archiveDirPath + Path.SEPARATOR + newName); LOG.info("Completed consuming file {}", fileNameMinusSuffix); if (!hdfs.rename(file, newFile)) { throw new IOException("Rename failed for file: " + file); } LOG.debug("Renamed file {} to {} ", file, newFile); return newFile; }
private Path tryUsingSimpleOutputPath(Path resultPath) throws IOException { if (inputFileNames.size() == 1) { // In case of only one input set output to be consistent with the // input name. Path inputPath = new Path(inputFileNames.get(0)); Path betterOutputPath = new Path(outputDir, inputPath.getName()); if (!fs.exists(betterOutputPath)) { fs.rename(resultPath, betterOutputPath); resultPath = betterOutputPath; } } return resultPath; }
/** * Move the specified file/directory to the hbase temp directory. * @param path The path of the file/directory to move * @return The temp location of the file/directory moved * @throws IOException in case of file-system failure */ public Path moveToTemp(final Path path) throws IOException { Path tempPath = new Path(this.tempdir, path.getName()); // Ensure temp exists if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) { throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); } if (!fs.rename(path, tempPath)) { throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'"); } return tempPath; }
/** * Reset the manager to allow another snapshot to proceed * * @param snapshotDir final path of the snapshot * @param workingDir directory where the in progress snapshot was built * @param fs {@link FileSystem} where the snapshot was built * @throws SnapshotCreationException if the snapshot could not be moved * @throws IOException the filesystem could not be reached */ public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs) throws SnapshotCreationException, IOException { LOG.debug( "Sentinel is done, just moving the snapshot from " + workingDir + " to " + snapshotDir); if (!fs.rename(workingDir, snapshotDir)) { throw new SnapshotCreationException( "Failed to move working directory(" + workingDir + ") to completed directory(" + snapshotDir + ")."); } finished = true; }
@Override public Location renameTo(Location destination) throws IOException { // Destination will always be of the same type as this location. if (fs instanceof DistributedFileSystem) { ((DistributedFileSystem) fs) .rename(path, ((HDFSLocation) destination).path, Options.Rename.OVERWRITE); return new HDFSLocation(fs, new Path(destination.toURI())); } if (fs.rename(path, ((HDFSLocation) destination).path)) { return new HDFSLocation(fs, new Path(destination.toURI())); } else { return null; } }
/** * HDFS 상에서 지정한 파일을 다른 디렉토리로 파일을 이동시킨다. * * @param conf Hadoop Configuration * @param path 이동할 파일 * @param prefixToAppend 파일을 이동할 때 파일명의 prefix에 추가할 문자열 * @param targetDirectory 목적 디렉토리 * @throws java.io.IOException 파일을 이동할 수 없는 경우 */ public static void moveFileToDirectory( Configuration conf, String path, String prefixToAppend, String targetDirectory) throws IOException { FileSystem fileSystem = FileSystem.get(conf); FileStatus[] statuses = fileSystem.listStatus(new Path(path)); for (FileStatus fileStatus : statuses) { String filename = prefixToAppend + "_" + fileStatus.getPath().getName(); if (!isExist(conf, targetDirectory + "/" + filename)) { fileSystem.rename(fileStatus.getPath(), new Path(targetDirectory + "/" + filename)); } else { throw new RuntimeException( "\t Warn: '" + fileStatus.getPath() + "' cannot moved. Already exists."); } } }