/** Check whether the quota is initialized correctly. */ @Test public void testQuotaInitialization() throws Exception { final int size = 500; Path testDir = new Path("/testDir"); long expectedSize = 3 * BLOCKSIZE + BLOCKSIZE / 2; dfs.mkdirs(testDir); dfs.setQuota(testDir, size * 4, expectedSize * size * 2); Path[] testDirs = new Path[size]; for (int i = 0; i < size; i++) { testDirs[i] = new Path(testDir, "sub" + i); dfs.mkdirs(testDirs[i]); dfs.setQuota(testDirs[i], 100, 1000000); DFSTestUtil.createFile(dfs, new Path(testDirs[i], "a"), expectedSize, (short) 1, 1L); } // Directly access the name system to obtain the current cached usage. INodeDirectory root = fsdir.getRoot(); HashMap<String, Long> nsMap = new HashMap<String, Long>(); HashMap<String, Long> dsMap = new HashMap<String, Long>(); scanDirsWithQuota(root, nsMap, dsMap, false); fsdir.updateCountForQuota(1); scanDirsWithQuota(root, nsMap, dsMap, true); fsdir.updateCountForQuota(2); scanDirsWithQuota(root, nsMap, dsMap, true); fsdir.updateCountForQuota(4); scanDirsWithQuota(root, nsMap, dsMap, true); }
/** Test truncate over quota does not mark file as UC or create a lease */ @Test(timeout = 60000) public void testTruncateOverQuota() throws Exception { final Path dir = new Path("/TestTruncateOverquota"); final Path file = new Path(dir, "file"); // create partial block file dfs.mkdirs(dir); DFSTestUtil.createFile(dfs, file, BLOCKSIZE / 2, REPLICATION, seed); // lower quota to cause exception when appending to partial block dfs.setQuota(dir, Long.MAX_VALUE - 1, 1); final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace(); try { dfs.truncate(file, BLOCKSIZE / 2 - 1); Assert.fail("truncate didn't fail"); } catch (RemoteException e) { assertTrue(e.getClassName().contains("DSQuotaExceededException")); } // check that the file exists, isn't UC, and has no dangling lease LeaseManager lm = cluster.getNamesystem().getLeaseManager(); INodeFile inode = fsdir.getINode(file.toString()).asFile(); Assert.assertNotNull(inode); Assert.assertFalse("should not be UC", inode.isUnderConstruction()); Assert.assertNull("should not have a lease", lm.getLease(inode)); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace(); assertEquals(spaceUsed, newSpaceUsed); // make sure edits aren't corrupted dfs.recoverLease(file); cluster.restartNameNodes(); }
/** Test allow-snapshot operation. */ @Test(timeout = 15000) public void testAllowSnapshot() throws Exception { final String pathStr = sub1.toString(); final INode before = fsdir.getINode(pathStr); // Before a directory is snapshottable Assert.assertFalse(before.asDirectory().isSnapshottable()); // After a directory is snapshottable final Path path = new Path(pathStr); hdfs.allowSnapshot(path); { final INode after = fsdir.getINode(pathStr); Assert.assertTrue(after.asDirectory().isSnapshottable()); } hdfs.disallowSnapshot(path); { final INode after = fsdir.getINode(pathStr); Assert.assertFalse(after.asDirectory().isSnapshottable()); } }
/** Test if the quota can be correctly updated when file length is updated through fsync */ @Test(timeout = 60000) public void testUpdateQuotaForFSync() throws Exception { final Path foo = new Path("/foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L); dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); FSDataOutputStream out = dfs.append(bar); out.write(new byte[BLOCKSIZE / 4]); ((DFSOutputStream) out.getWrappedStream()) .hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH)); INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory(); QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); long ns = quota.getNameSpace(); long ds = quota.getStorageSpace(); assertEquals(2, ns); // foo and bar assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction out.write(new byte[BLOCKSIZE / 4]); out.close(); fooNode = fsdir.getINode4Write(foo.toString()).asDirectory(); quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns = quota.getNameSpace(); ds = quota.getStorageSpace(); assertEquals(2, ns); assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds); // append another block DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE); quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns = quota.getNameSpace(); ds = quota.getStorageSpace(); assertEquals(2, ns); // foo and bar assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds); }
/** * Test append over a specific type of storage quota does not mark file as UC or create a lease */ @Test(timeout = 60000) public void testAppendOverTypeQuota() throws Exception { final Path dir = new Path("/TestAppendOverTypeQuota"); final Path file = new Path(dir, "file"); // create partial block file dfs.mkdirs(dir); // set the storage policy on dir dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); DFSTestUtil.createFile(dfs, file, BLOCKSIZE / 2, REPLICATION, seed); // set quota of SSD to 1L dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L); final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory(); final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace(); try { DFSTestUtil.appendFile(dfs, file, BLOCKSIZE); Assert.fail("append didn't fail"); } catch (QuotaByStorageTypeExceededException e) { // ignore } // check that the file exists, isn't UC, and has no dangling lease LeaseManager lm = cluster.getNamesystem().getLeaseManager(); INodeFile inode = fsdir.getINode(file.toString()).asFile(); Assert.assertNotNull(inode); Assert.assertFalse("should not be UC", inode.isUnderConstruction()); Assert.assertNull("should not have a lease", lm.getLease(inode)); // make sure the quota usage is unchanged final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace(); assertEquals(spaceUsed, newSpaceUsed); // make sure edits aren't corrupted dfs.recoverLease(file); cluster.restartNameNodes(); }
/** Test if the quota can be correctly updated for create file */ @Test(timeout = 60000) public void testQuotaUpdateWithFileCreate() throws Exception { final Path foo = new Path(dir, "foo"); Path createdFile = new Path(foo, "created_file.data"); dfs.mkdirs(foo); dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2; DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, REPLICATION, seed); INode fnode = fsdir.getINode4Write(foo.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2, cnt.getNameSpace()); assertEquals(fileLen * REPLICATION, cnt.getStorageSpace()); }
/** Test if the quota can be correctly updated for append */ @Test(timeout = 60000) public void testUpdateQuotaForAppend() throws Exception { final Path foo = new Path(dir, "foo"); final Path bar = new Path(foo, "bar"); long currentFileLen = BLOCKSIZE; DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed); dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); // append half of the block data, the previous file length is at block // boundary DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2); currentFileLen += (BLOCKSIZE / 2); INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory(); assertTrue(fooNode.isQuotaSet()); QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); long ns = quota.getNameSpace(); long ds = quota.getStorageSpace(); assertEquals(2, ns); // foo and bar assertEquals(currentFileLen * REPLICATION, ds); ContentSummary c = dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(), ds); // append another block, the previous file length is not at block boundary DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE); currentFileLen += BLOCKSIZE; quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns = quota.getNameSpace(); ds = quota.getStorageSpace(); assertEquals(2, ns); // foo and bar assertEquals(currentFileLen * REPLICATION, ds); c = dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(), ds); // append several blocks DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8); currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8); quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns = quota.getNameSpace(); ds = quota.getStorageSpace(); assertEquals(2, ns); // foo and bar assertEquals(currentFileLen * REPLICATION, ds); c = dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(), ds); }
/** modified by tony */ @SuppressWarnings("deprecation") int loadEditRecords(int logVersion, DataInputStream in, boolean closeOnExit) throws IOException { FSNamesystem.LOG.info("logversion: " + logVersion); FSDirectory fsDir = fsNamesys.dir; int numEdits = 0; String clientName = null; String clientMachine = null; String path = null; int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRenameOld = 0, numOpSetRepl = 0, numOpMkDir = 0, numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpRename = 0, numOpConcatDelete = 0, numOpSymlink = 0, numOpGetDelegationToken = 0, numOpRenewDelegationToken = 0, numOpCancelDelegationToken = 0, numOpUpdateMasterKey = 0, numOpOther = 0; try { while (true) { long timestamp = 0; long mtime = 0; long atime = 0; long blockSize = 0; byte opcode = -1; try { in.mark(1); opcode = in.readByte(); if (opcode == Ops.OP_INVALID) { in.reset(); // reset back to end of file if somebody reads it again break; // no more transactions } } catch (EOFException e) { break; // no more transactions } numEdits++; switch (opcode) { case Ops.OP_ADD: case Ops.OP_CLOSE: { // versions > 0 support per file replication // get name and replication int length = in.readInt(); // modified by tony if (-7 == logVersion && length != 3 || -17 < logVersion && logVersion < -7 && length != 4 || logVersion <= -17 && length != 7) { throw new IOException( "Incorrect data format." + " logVersion is " + logVersion + " but writables.length is " + length + ". "); } path = FSImageSerialization.readString(in); short replication = fsNamesys.adjustReplication(readShort(in)); mtime = readLong(in); if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { atime = readLong(in); } if (logVersion < -7) { blockSize = readLong(in); } long fileSize = readLong(in); byte type = (byte) readLong(in); // get blocks boolean isFileUnderConstruction = (opcode == Ops.OP_ADD); BlockInfo blocks[] = readBlocks(in, logVersion, isFileUnderConstruction, replication); // Older versions of HDFS does not store the block size in inode. // If the file has more than one block, use the size of the // first block as the blocksize. Otherwise use the default // block size. if (-8 <= logVersion && blockSize == 0) { if (blocks.length > 1) { blockSize = blocks[0].getNumBytes(); } else { long first = ((blocks.length == 1) ? blocks[0].getNumBytes() : 0); blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first); } } PermissionStatus permissions = fsNamesys.getUpgradePermission(); if (logVersion <= -11) { permissions = PermissionStatus.read(in); } CodingMatrix codingMatrix = CodingMatrix.getMatrixofCertainType(type); codingMatrix.readFields(in); /** added by tony* */ LongWritable offset = new LongWritable(); offset.readFields(in); long headeroffset = offset.get(); // clientname, clientMachine and block locations of last block. if (opcode == Ops.OP_ADD && logVersion <= -12) { clientName = FSImageSerialization.readString(in); clientMachine = FSImageSerialization.readString(in); if (-13 <= logVersion) { readDatanodeDescriptorArray(in); } } else { clientName = ""; clientMachine = ""; } // The open lease transaction re-creates a file if necessary. // Delete the file if it already exists. if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug( opcode + ": " + path + " numblocks : " + blocks.length + " clientHolder " + clientName + " clientMachine " + clientMachine); } fsDir.unprotectedDelete(path, mtime); /** modified by tony add to the file tree */ INodeFile node = (INodeFile) fsDir.unprotectedAddFile( path, permissions, codingMatrix, headeroffset, fileSize, blocks, replication, mtime, atime, blockSize); if (isFileUnderConstruction) { numOpAdd++; // // Replace current node with a INodeUnderConstruction. // Recreate in-memory lease record. // // INodeFileUnderConstruction cons = new INodeFileUnderConstruction( // node.getLocalNameBytes(), // node.getReplication(), // node.getModificationTime(), // node.getPreferredBlockSize(), // node.getBlocks(), // node.getPermissionStatus(), // clientName, // clientMachine, // null); // TODO: INodeFileUnderConstruction cons = null; fsDir.replaceNode(path, node, cons); fsNamesys.leaseManager.addLease(cons.getClientName(), path); } break; } case Ops.OP_SET_REPLICATION: { numOpSetRepl++; path = FSImageSerialization.readString(in); short replication = fsNamesys.adjustReplication(readShort(in)); fsDir.unprotectedSetReplication(path, replication, null); break; } case Ops.OP_CONCAT_DELETE: { numOpConcatDelete++; int length = in.readInt(); if (length < 3) { // trg, srcs.., timestam throw new IOException("Incorrect data format. " + "Mkdir operation."); } String trg = FSImageSerialization.readString(in); int srcSize = length - 1 - 1; // trg and timestamp String[] srcs = new String[srcSize]; for (int i = 0; i < srcSize; i++) { srcs[i] = FSImageSerialization.readString(in); } timestamp = readLong(in); fsDir.unprotectedConcat(trg, srcs); break; } case Ops.OP_RENAME_OLD: { numOpRenameOld++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } String s = FSImageSerialization.readString(in); String d = FSImageSerialization.readString(in); timestamp = readLong(in); HdfsFileStatus dinfo = fsDir.getFileInfo(d, false); fsDir.unprotectedRenameTo(s, d, timestamp); fsNamesys.changeLease(s, d, dinfo); break; } case Ops.OP_DELETE: { numOpDelete++; int length = in.readInt(); if (length != 2) { throw new IOException("Incorrect data format. " + "delete operation."); } path = FSImageSerialization.readString(in); timestamp = readLong(in); fsDir.unprotectedDelete(path, timestamp); break; } case Ops.OP_MKDIR: { numOpMkDir++; PermissionStatus permissions = fsNamesys.getUpgradePermission(); int length = in.readInt(); if (-17 < logVersion && length != 2 || logVersion <= -17 && length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } path = FSImageSerialization.readString(in); timestamp = readLong(in); // The disk format stores atimes for directories as well. // However, currently this is not being updated/used because of // performance reasons. if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { atime = readLong(in); } if (logVersion <= -11) { permissions = PermissionStatus.read(in); } fsDir.unprotectedMkdir(path, permissions, timestamp); break; } case Ops.OP_SET_GENSTAMP: { numOpSetGenStamp++; long lw = in.readLong(); fsNamesys.setGenerationStamp(lw); break; } case Ops.OP_DATANODE_ADD: { numOpOther++; // Datanodes are not persistent any more. FSImageSerialization.DatanodeImage.skipOne(in); break; } case Ops.OP_DATANODE_REMOVE: { numOpOther++; DatanodeID nodeID = new DatanodeID(); nodeID.readFields(in); // Datanodes are not persistent any more. break; } case Ops.OP_SET_PERMISSIONS: { numOpSetPerm++; fsDir.unprotectedSetPermission( FSImageSerialization.readString(in), FsPermission.read(in)); break; } case Ops.OP_SET_OWNER: { numOpSetOwner++; fsDir.unprotectedSetOwner( FSImageSerialization.readString(in), FSImageSerialization.readString_EmptyAsNull(in), FSImageSerialization.readString_EmptyAsNull(in)); break; } case Ops.OP_SET_NS_QUOTA: { fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), readLongWritable(in), FSConstants.QUOTA_DONT_SET); break; } case Ops.OP_CLEAR_NS_QUOTA: { fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); break; } case Ops.OP_SET_QUOTA: fsDir.unprotectedSetQuota( FSImageSerialization.readString(in), readLongWritable(in), readLongWritable(in)); break; case Ops.OP_TIMES: { numOpTimes++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "times operation."); } path = FSImageSerialization.readString(in); mtime = readLong(in); atime = readLong(in); fsDir.unprotectedSetTimes(path, mtime, atime, true); break; } case Ops.OP_SYMLINK: { numOpSymlink++; int length = in.readInt(); if (length != 4) { throw new IOException("Incorrect data format. " + "symlink operation."); } path = FSImageSerialization.readString(in); String value = FSImageSerialization.readString(in); mtime = readLong(in); atime = readLong(in); PermissionStatus perm = PermissionStatus.read(in); fsDir.unprotectedSymlink(path, value, mtime, atime, perm); break; } case Ops.OP_RENAME: { numOpRename++; int length = in.readInt(); if (length != 3) { throw new IOException("Incorrect data format. " + "Mkdir operation."); } String s = FSImageSerialization.readString(in); String d = FSImageSerialization.readString(in); timestamp = readLong(in); Rename[] options = readRenameOptions(in); HdfsFileStatus dinfo = fsDir.getFileInfo(d, false); fsDir.unprotectedRenameTo(s, d, timestamp, options); fsNamesys.changeLease(s, d, dinfo); break; } case Ops.OP_GET_DELEGATION_TOKEN: { numOpGetDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); long expiryTime = readLong(in); fsNamesys .getDelegationTokenSecretManager() .addPersistedDelegationToken(delegationTokenId, expiryTime); break; } case Ops.OP_RENEW_DELEGATION_TOKEN: { numOpRenewDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); long expiryTime = readLong(in); fsNamesys .getDelegationTokenSecretManager() .updatePersistedTokenRenewal(delegationTokenId, expiryTime); break; } case Ops.OP_CANCEL_DELEGATION_TOKEN: { numOpCancelDelegationToken++; DelegationTokenIdentifier delegationTokenId = new DelegationTokenIdentifier(); delegationTokenId.readFields(in); fsNamesys .getDelegationTokenSecretManager() .updatePersistedTokenCancellation(delegationTokenId); break; } case Ops.OP_UPDATE_MASTER_KEY: { numOpUpdateMasterKey++; DelegationKey delegationKey = new DelegationKey(); delegationKey.readFields(in); fsNamesys.getDelegationTokenSecretManager().updatePersistedMasterKey(delegationKey); break; } default: { throw new IOException("Never seen opcode " + opcode); } } } } catch (IOException ex) { check203UpgradeFailure(logVersion, ex); } finally { if (closeOnExit) in.close(); } if (FSImage.LOG.isDebugEnabled()) { FSImage.LOG.debug( "numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose + " numOpDelete = " + numOpDelete + " numOpRenameOld = " + numOpRenameOld + " numOpSetRepl = " + numOpSetRepl + " numOpMkDir = " + numOpMkDir + " numOpSetPerm = " + numOpSetPerm + " numOpSetOwner = " + numOpSetOwner + " numOpSetGenStamp = " + numOpSetGenStamp + " numOpTimes = " + numOpTimes + " numOpConcatDelete = " + numOpConcatDelete + " numOpRename = " + numOpRename + " numOpGetDelegationToken = " + numOpGetDelegationToken + " numOpRenewDelegationToken = " + numOpRenewDelegationToken + " numOpCancelDelegationToken = " + numOpCancelDelegationToken + " numOpUpdateMasterKey = " + numOpUpdateMasterKey + " numOpOther = " + numOpOther); } return numEdits; }
SingleIndex(String directoryName, String fileName, String indexPath) { try { Directory dir = FSDirectory.open(new File(indexPath)); // write to an directory for checking Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_48); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_48, analyzer); IndexWriter writer = new IndexWriter(dir, iwc); String[] files = directoryName.list(); FileInputStream fis; try { fis = new FileInputStream(fileName); } catch (FileNotFoundException fnfe) { // at least on windows, some temporary files raise this exception with an "access denied" // message // checking if the file can be read doesn't help return; } try { // make a new, empty document Document doc = new Document(); // Add the path of the file as a field named "path". Use a // field that is indexed (i.e. searchable), but don't tokenize // the field into separate words and don't index term frequency // or positional information: Field pathField = new StringField("path", file.getPath(), Field.Store.YES); doc.add(pathField); // Add the last modified date of the file a field named "modified". // Use a LongField that is indexed (i.e. efficiently filterable with // NumericRangeFilter). This indexes to milli-second resolution, which // is often too fine. You could instead create a number based on // year/month/day/hour/minutes/seconds, down the resolution you require. // For example the long value 2011021714 would mean // February 17, 2011, 2-3 PM. doc.add(new LongField("modified", file.lastModified(), Field.Store.NO)); // Add the contents of the file to a field named "contents". Specify a Reader, // so that the text of the file is tokenized and indexed, but not stored. // Note that FileReader expects the file to be in UTF-8 encoding. // If that's not the case searching for special characters will fail. doc.add( new TextField( "contents", new BufferedReader(new InputStreamReader(fis, StandardCharsets.UTF_8)))); if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { // New index, so we just add the document (no old document can be there): System.out.println("adding " + file); writer.addDocument(doc); } else { // Existing index (an old copy of this document may have been indexed) so // we use updateDocument instead to replace the old one matching the exact // path, if present: System.out.println("updating " + file); writer.updateDocument(new Term("path", file.getPath()), doc); } } finally { fis.close(); } writer.close(); } catch (Exception e) { e.printStackTrace(); } }