private ArrayList<String> init(File newFile, List<String> uuids, List<Integer> dataVersions) throws IllegalArgumentException, IOException { String base = newFile.toURI().toString(); LocalFileSystem fs = FileSystem.getLocal(new Configuration()); ArrayList<String> accumuloPaths = new ArrayList<String>(); for (int i = 0; i < uuids.size(); i++) { String volume = "v" + i; String accumuloPath = base + "/" + volume + "/accumulo"; accumuloPaths.add(accumuloPath); if (uuids.get(i) != null) { fs.mkdirs(new Path(accumuloPath + "/" + ServerConstants.INSTANCE_ID_DIR)); fs.createNewFile( new Path(accumuloPath + "/" + ServerConstants.INSTANCE_ID_DIR + "/" + uuids.get(i))); } if (dataVersions.get(i) != null) { fs.mkdirs(new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR)); fs.createNewFile( new Path(accumuloPath + "/" + ServerConstants.VERSION_DIR + "/" + dataVersions.get(i))); } } return accumuloPaths; }
public void testCloseForErroneousRCFile() throws IOException { Configuration conf = new Configuration(); LocalFileSystem fs = FileSystem.getLocal(conf); // create an empty file (which is not a valid rcfile) Path path = new Path(System.getProperty("test.build.data", ".") + "/broken.rcfile"); fs.create(path).close(); // try to create RCFile.Reader final TestFSDataInputStream[] openedFile = new TestFSDataInputStream[1]; try { new RCFile.Reader(fs, path, conf) { // this method is called by the RCFile.Reader constructor, overwritten, // so we can access the opened file protected FSDataInputStream openFile(FileSystem fs, Path file, int bufferSize, long length) throws IOException { final InputStream in = super.openFile(fs, file, bufferSize, length); openedFile[0] = new TestFSDataInputStream(in); return openedFile[0]; } }; fail("IOException expected."); } catch (IOException expected) { } assertNotNull(path + " should have been opened.", openedFile[0]); assertTrue("InputStream for " + path + " should have been closed.", openedFile[0].isClosed()); }
/** * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code> of a populated DFS * filesystem. * * <p>This method creates and populates the directory specified by <code>parent/dirName</code>, * for each parent directory. The contents of the new directories will be appropriate for the * given node type. If the directory does not exist, it will be created. If the directory already * exists, it will first be deleted. * * <p>By default, a singleton master populated storage directory is created for a Namenode * (contains edits, fsimage, version, and time files) and a Datanode (contains version and block * files). These directories are then copied by this method to create new storage directories of * the appropriate type (Namenode or Datanode). * * @return the array of created directories */ public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception { File[] retVal = new File[parents.length]; for (int i = 0; i < parents.length; i++) { File newDir = new File(parents[i], dirName); createEmptyDirs(new String[] {newDir.toString()}); LocalFileSystem localFS = FileSystem.getLocal(new Configuration()); switch (nodeType) { case NAME_NODE: localFS.copyToLocalFile( new Path(namenodeStorage.toString(), "current"), new Path(newDir.toString()), false); Path newImgDir = new Path(newDir.getParent(), "image"); if (!localFS.exists(newImgDir)) localFS.copyToLocalFile( new Path(namenodeStorage.toString(), "image"), newImgDir, false); break; case DATA_NODE: localFS.copyToLocalFile( new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false); Path newStorageFile = new Path(newDir.getParent(), "storage"); if (!localFS.exists(newStorageFile)) localFS.copyToLocalFile( new Path(datanodeStorage.toString(), "storage"), newStorageFile, false); break; } retVal[i] = newDir; } return retVal; }
@BeforeClass public static void startCluster() throws IOException { GCWatcher.init(0.60); LocalFileSystem localFS = FileSystem.getLocal(new Configuration()); File testDirectory = new File(TMPDIR, "blur-cluster-test").getAbsoluteFile(); testDirectory.mkdirs(); Path directory = new Path(testDirectory.getPath()); FsPermission dirPermissions = localFS.getFileStatus(directory).getPermission(); FsAction userAction = dirPermissions.getUserAction(); FsAction groupAction = dirPermissions.getGroupAction(); FsAction otherAction = dirPermissions.getOtherAction(); StringBuilder builder = new StringBuilder(); builder.append(userAction.ordinal()); builder.append(groupAction.ordinal()); builder.append(otherAction.ordinal()); String dirPermissionNum = builder.toString(); System.setProperty("dfs.datanode.data.dir.perm", dirPermissionNum); testDirectory.delete(); miniCluster = new MiniCluster(); miniCluster.startBlurCluster(new File(testDirectory, "cluster").getAbsolutePath(), 2, 3, true); connectionStr = miniCluster.getControllerConnectionStr(); }
/** * Delete a local path with asyncDiskService if available, or otherwise synchronously with local * file system. */ private static void deleteLocalPath( MRAsyncDiskService asyncDiskService, LocalFileSystem fs, Path path) throws IOException { boolean deleted = false; if (asyncDiskService != null) { // Try to delete using asyncDiskService String localPathToDelete = path.toUri().getPath(); deleted = asyncDiskService.moveAndDeleteAbsolutePath(localPathToDelete); if (!deleted) { LOG.warn( "Cannot find DistributedCache path " + localPathToDelete + " on any of the asyncDiskService volumes!"); } } if (!deleted) { // If no asyncDiskService, we will delete the files synchronously fs.delete(path, true); } LOG.info("Deleted path " + path); }
static void writeToIndexFile( String logLocation, TaskAttemptID currentTaskid, boolean isCleanup, Map<LogName, Long[]> lengths) throws IOException { // To ensure atomicity of updates to index file, write to temporary // index // file first and then rename. File tmpIndexFile = getTmpIndexFile(currentTaskid, isCleanup); BufferedOutputStream bos = new BufferedOutputStream(SecureIOUtils.createForWrite(tmpIndexFile, 0644)); DataOutputStream dos = new DataOutputStream(bos); // the format of the index file is // LOG_DIR: <the dir where the task logs are really stored> // STDOUT: <start-offset in the stdout file> <length> // STDERR: <start-offset in the stderr file> <length> // SYSLOG: <start-offset in the syslog file> <length> dos.writeBytes(LogFileDetail.LOCATION + logLocation + "\n"); for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) { Long[] lens = lengths.get(logName); dos.writeBytes( logName.toString() + ":" + lens[0].toString() + " " + Long.toString(lens[1].longValue() - lens[0].longValue()) + "\n"); } dos.close(); File indexFile = getIndexFile(currentTaskid, isCleanup); Path indexFilePath = new Path(indexFile.getAbsolutePath()); Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath()); if (localFS == null) { // set localFS once localFS = FileSystem.getLocal(new Configuration()); } localFS.rename(tmpIndexFilePath, indexFilePath); }
@Override public void truncateLogsAsUser(String user, List<Task> allAttempts) throws IOException { Task firstTask = allAttempts.get(0); String taskid = firstTask.getTaskID().toString(); LocalDirAllocator ldirAlloc = new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY); String taskRanFile = TaskTracker.TT_LOG_TMP_DIR + Path.SEPARATOR + taskid; Configuration conf = getConf(); // write the serialized task information to a file to pass to the truncater Path taskRanFilePath = ldirAlloc.getLocalPathForWrite(taskRanFile, conf); LocalFileSystem lfs = FileSystem.getLocal(conf); FSDataOutputStream out = lfs.create(taskRanFilePath); out.writeInt(allAttempts.size()); for (Task t : allAttempts) { out.writeBoolean(t.isMapTask()); t.write(out); } out.close(); lfs.setPermission(taskRanFilePath, FsPermission.createImmutable((short) 0755)); List<String> command = new ArrayList<String>(); File jvm = // use same jvm as parent new File(new File(System.getProperty("java.home"), "bin"), "java"); command.add(jvm.toString()); command.add("-Djava.library.path=" + System.getProperty("java.library.path")); command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir()); command.add("-Dhadoop.root.logger=INFO,console"); command.add("-classpath"); command.add(System.getProperty("java.class.path")); // main of TaskLogsTruncater command.add(TaskLogsTruncater.class.getName()); command.add(taskRanFilePath.toString()); String[] taskControllerCmd = new String[4 + command.size()]; taskControllerCmd[0] = taskControllerExe; taskControllerCmd[1] = user; taskControllerCmd[2] = localStorage.getDirsString(); taskControllerCmd[3] = Integer.toString(Commands.RUN_COMMAND_AS_USER.getValue()); int i = 4; for (String cmdArg : command) { taskControllerCmd[i++] = cmdArg; } if (LOG.isDebugEnabled()) { for (String cmd : taskControllerCmd) { LOG.debug("taskctrl command = " + cmd); } } ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd); try { shExec.execute(); } catch (Exception e) { LOG.warn( "Exit code from " + taskControllerExe.toString() + " is : " + shExec.getExitCode() + " for truncateLogs"); LOG.warn( "Exception thrown by " + taskControllerExe.toString() + " : " + StringUtils.stringifyException(e)); LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:"); logOutput(shExec.getOutput()); lfs.delete(taskRanFilePath, false); throw new IOException(e); } lfs.delete(taskRanFilePath, false); if (LOG.isDebugEnabled()) { LOG.info("Output from LinuxTaskController's " + taskControllerExe.toString() + " follows:"); logOutput(shExec.getOutput()); } }
/** Deletes content of the repository files in the bucket */ public void cleanRepositoryFiles(String basePath) throws IOException { LocalFileSystem fs = FileSystem.getLocal(new Configuration()); Path p = new Path(path); fs.delete(p.makeQualified(fs), true); }