@Override public void insertAll(Iterator<Product2<K, V>> records) throws IOException { assert (partitionWriters == null); if (!records.hasNext()) { return; } final SerializerInstance serInstance = serializer.newInstance(); final long openStartTime = System.nanoTime(); partitionWriters = new DiskBlockObjectWriter[numPartitions]; for (int i = 0; i < numPartitions; i++) { final Tuple2<TempShuffleBlockId, File> tempShuffleBlockIdPlusFile = blockManager.diskBlockManager().createTempShuffleBlock(); final File file = tempShuffleBlockIdPlusFile._2(); final BlockId blockId = tempShuffleBlockIdPlusFile._1(); partitionWriters[i] = blockManager .getDiskWriter(blockId, file, serInstance, fileBufferSize, writeMetrics) .open(); } // Creating the file to write to and creating a disk writer both involve interacting with // the disk, and can take a long time in aggregate when we open many files, so should be // included in the shuffle write time. writeMetrics.incShuffleWriteTime(System.nanoTime() - openStartTime); while (records.hasNext()) { final Product2<K, V> record = records.next(); final K key = record._1(); partitionWriters[partitioner.getPartition(key)].write(key, record._2()); } for (DiskBlockObjectWriter writer : partitionWriters) { writer.commitAndClose(); } }
public static void loadPlayer(Registry registry) { Player p = (Player) getPlayers().get(player); if (p != null) { p.setTransient(registry); } GameController gc = registry.getGameController(); registry.getPlayerManager().clearPlayers(); registry.getPlayerManager().registerPlayer(p); BlockManager bm = (BlockManager) blockManagers.get(player); bm.name = "Saved"; bm = (BlockManager) bm.clone(); bm.name = "Clone"; bm.setTransient(registry); gc.setBlockManager(bm); PlaceableManager pm = (PlaceableManager) placeableManagers.get(player).clone(); gc.setPlaceableManager(pm); MonsterManager mm = (MonsterManager) monsterManagers.get(player).clone(); mm.setTransient(registry); gc.setMonsterManager(mm); if (p != null) { p.resetPlayer(); } // unloadUnused(); }
/** * Call heartbeat check function of HeartbeatManager and get under replicated blocks count within * write lock to make sure computeDatanodeWork doesn't interfere. * * @param namesystem the FSNamesystem * @param bm the BlockManager to manipulate * @return the number of under replicated blocks */ public static int checkHeartbeatAndGetUnderReplicatedBlocksCount( FSNamesystem namesystem, BlockManager bm) { namesystem.writeLock(); try { bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck(); return bm.getUnderReplicatedNotMissingBlocks(); } finally { namesystem.writeUnlock(); } }
/** * testing that APPEND operation can handle token expiration when re-establishing pipeline is * needed */ @Test public void testAppend() throws Exception { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = getConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); final NameNode nn = cluster.getNameNode(); final BlockManager bm = nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager(); // set a short token lifetime (1 second) SecurityTestUtil.setBlockTokenLifetime(sm, 1000L); Path fileToAppend = new Path(FILE_TO_APPEND); FileSystem fs = cluster.getFileSystem(); byte[] expected = generateBytes(FILE_SIZE); // write a one-byte file FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE); stm.write(expected, 0, 1); stm.close(); // open the file again for append stm = fs.append(fileToAppend); int mid = expected.length - 1; stm.write(expected, 1, mid - 1); stm.hflush(); /* * wait till token used in stm expires */ Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm); while (!SecurityTestUtil.isBlockTokenExpired(token)) { try { Thread.sleep(10); } catch (InterruptedException ignored) { } } // remove a datanode to force re-establishing pipeline cluster.stopDataNode(0); // append the rest of the file stm.write(expected, mid, expected.length - mid); stm.close(); // check if append is successful FSDataInputStream in5 = fs.open(fileToAppend); assertTrue(checkFile1(in5, expected)); } finally { if (cluster != null) { cluster.shutdown(); } } }
/** * @return a tuple of the replica state (number racks, number live replicas, and number needed * replicas) for the given block. */ public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) { final BlockManager bm = namesystem.getBlockManager(); namesystem.readLock(); try { return new int[] { getNumberOfRacks(bm, b), bm.countNodes(b).liveReplicas(), bm.neededReplications.contains(b) ? 1 : 0 }; } finally { namesystem.readUnlock(); } }
/** * Change whether the block placement policy will prefer the writer's local Datanode or not. * * @param prefer if true, prefer local node */ public static void setWritingPrefersLocalNode(BlockManager bm, boolean prefer) { BlockPlacementPolicy bpp = bm.getBlockPlacementPolicy(); Preconditions.checkState( bpp instanceof BlockPlacementPolicyDefault, "Must use default policy, got %s", bpp.getClass()); ((BlockPlacementPolicyDefault) bpp).setPreferLocalNode(prefer); }
/** Stop the replication monitor thread */ public static void stopReplicationThread(final BlockManager blockManager) throws IOException { blockManager.enableRMTerminationForTesting(); blockManager.replicationThread.interrupt(); try { blockManager.replicationThread.join(); } catch (InterruptedException ie) { throw new IOException("Interrupted while trying to stop ReplicationMonitor"); } }
/** * Stop decommissioning the specified datanode. * * @param node */ @VisibleForTesting public void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.stopDecommission(node); // Over-replicated blocks will be detected and processed when // the dead node comes back and send in its full block report. if (node.isAlive) { blockManager.processOverReplicatedBlocksOnReCommission(node); } // Remove from tracking in DecommissionManager pendingNodes.remove(node); decomNodeBlocks.remove(node); } else { LOG.trace("stopDecommission: Node {} in {}, nothing to do." + node, node.getAdminState()); } }
/* * Test the blocks directly in contact, and if they are ascending rails, add * them to the updatePrevention list. */ private void checkForAscendingRails() { BlockFace[] cardinals = { BlockFace.EAST, BlockFace.WEST, BlockFace.NORTH, BlockFace.SOUTH, BlockFace.UP }; Block block = blockState.getBlock(); for (BlockFace face : cardinals) { Block tmp_block = block.getRelative(face); if (tmp_block.getState() instanceof Rails) { byte data = tmp_block.getData(); if (data > 1 && data < 6) { BlockFace facing = null; if (data == 2) facing = BlockFace.EAST; else if (data == 3) facing = BlockFace.WEST; else if (data == 4) facing = BlockFace.NORTH; else if (data == 5) facing = BlockFace.SOUTH; if (tmp_block.getRelative(facing).getType() == Material.AIR) BlockManager.putUpdatePrevention(CreeperBlock.newBlock(tmp_block.getState())); } } } }
/** * Checks whether a block is sufficiently replicated for decommissioning. Full-strength * replication is not always necessary, hence "sufficient". * * @return true if sufficient, else false. */ private boolean isSufficientlyReplicated( BlockInfo block, BlockCollection bc, NumberReplicas numberReplicas) { final int numExpected = bc.getPreferredBlockReplication(); final int numLive = numberReplicas.liveReplicas(); if (!blockManager.isNeededReplication(block, numExpected, numLive)) { // Block doesn't need replication. Skip. LOG.trace("Block {} does not need replication.", block); return true; } // Block is under-replicated LOG.trace("Block {} numExpected={}, numLive={}", block, numExpected, numLive); if (numExpected > numLive) { if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) { // Can decom a UC block as long as there will still be minReplicas if (numLive >= blockManager.minReplication) { LOG.trace( "UC block {} sufficiently-replicated since numLive ({}) " + ">= minR ({})", block, numLive, blockManager.minReplication); return true; } else { LOG.trace( "UC block {} insufficiently-replicated since numLive " + "({}) < minR ({})", block, numLive, blockManager.minReplication); } } else { // Can decom a non-UC as long as the default replication is met if (numLive >= blockManager.defaultReplication) { return true; } } } return false; }
/** Refresh block queue counts on the name-node. */ public static void updateState(final BlockManager blockManager) { blockManager.updateState(); }
public static int computeInvalidationWork(BlockManager bm) { return bm.computeInvalidateWork(Integer.MAX_VALUE); }
protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception { final int numDataNodes = cluster.getDataNodes().size(); final NameNode nn = cluster.getNameNode(); final NamenodeProtocols nnProto = nn.getRpcServer(); final BlockManager bm = nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager(); // set a short token lifetime (1 second) initially SecurityTestUtil.setBlockTokenLifetime(sm, 1000L); Path fileToRead = new Path(FILE_TO_READ); FileSystem fs = cluster.getFileSystem(); byte[] expected = generateBytes(FILE_SIZE); createFile(fs, fileToRead, expected); /* * setup for testing expiration handling of cached tokens */ // read using blockSeekTo(). Acquired tokens are cached in in1 FSDataInputStream in1 = fs.open(fileToRead); assertTrue(checkFile1(in1, expected)); // read using blockSeekTo(). Acquired tokens are cached in in2 FSDataInputStream in2 = fs.open(fileToRead); assertTrue(checkFile1(in2, expected)); // read using fetchBlockByteRange(). Acquired tokens are cached in in3 FSDataInputStream in3 = fs.open(fileToRead); assertTrue(checkFile2(in3, expected)); /* * testing READ interface on DN using a BlockReader */ DFSClient client = null; try { client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); } finally { if (client != null) client.close(); } List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks(); LocatedBlock lblock = locatedBlocks.get(0); // first block // verify token is not expired assertFalse(isBlockTokenExpired(lblock)); // read with valid token, should succeed tryRead(conf, lblock, true); /* * wait till myToken and all cached tokens in in1, in2 and in3 expire */ while (!isBlockTokenExpired(lblock)) { try { Thread.sleep(10); } catch (InterruptedException ignored) { } } /* * continue testing READ interface on DN using a BlockReader */ // verify token is expired assertTrue(isBlockTokenExpired(lblock)); // read should fail tryRead(conf, lblock, false); // use a valid new token bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ); // read should succeed tryRead(conf, lblock, true); // use a token with wrong blockID long rightId = lblock.getBlock().getBlockId(); long wrongId = rightId + 1; lblock.getBlock().setBlockId(wrongId); bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ); lblock.getBlock().setBlockId(rightId); // read should fail tryRead(conf, lblock, false); // use a token with wrong access modes bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE); // read should fail tryRead(conf, lblock, false); // set a long token lifetime for future tokens SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L); /* * testing that when cached tokens are expired, DFSClient will re-fetch * tokens transparently for READ. */ // confirm all tokens cached in in1 are expired by now List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1); for (LocatedBlock blk : lblocks) { assertTrue(isBlockTokenExpired(blk)); } // verify blockSeekTo() is able to re-fetch token transparently in1.seek(0); assertTrue(checkFile1(in1, expected)); // confirm all tokens cached in in2 are expired by now List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2); for (LocatedBlock blk : lblocks2) { assertTrue(isBlockTokenExpired(blk)); } // verify blockSeekTo() is able to re-fetch token transparently (testing // via another interface method) if (isStriped) { // striped block doesn't support seekToNewSource in2.seek(0); } else { assertTrue(in2.seekToNewSource(0)); } assertTrue(checkFile1(in2, expected)); // confirm all tokens cached in in3 are expired by now List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3); for (LocatedBlock blk : lblocks3) { assertTrue(isBlockTokenExpired(blk)); } // verify fetchBlockByteRange() is able to re-fetch token transparently assertTrue(checkFile2(in3, expected)); /* * testing that after datanodes are restarted on the same ports, cached * tokens should still work and there is no need to fetch new tokens from * namenode. This test should run while namenode is down (to make sure no * new tokens can be fetched from namenode). */ // restart datanodes on the same ports that they currently use assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); cluster.shutdownNameNode(0); // confirm tokens cached in in1 are still valid lblocks = DFSTestUtil.getAllBlocks(in1); for (LocatedBlock blk : lblocks) { assertFalse(isBlockTokenExpired(blk)); } // verify blockSeekTo() still works (forced to use cached tokens) in1.seek(0); assertTrue(checkFile1(in1, expected)); // confirm tokens cached in in2 are still valid lblocks2 = DFSTestUtil.getAllBlocks(in2); for (LocatedBlock blk : lblocks2) { assertFalse(isBlockTokenExpired(blk)); } // verify blockSeekTo() still works (forced to use cached tokens) if (isStriped) { in2.seek(0); } else { in2.seekToNewSource(0); } assertTrue(checkFile1(in2, expected)); // confirm tokens cached in in3 are still valid lblocks3 = DFSTestUtil.getAllBlocks(in3); for (LocatedBlock blk : lblocks3) { assertFalse(isBlockTokenExpired(blk)); } // verify fetchBlockByteRange() still works (forced to use cached tokens) assertTrue(checkFile2(in3, expected)); /* * testing that when namenode is restarted, cached tokens should still * work and there is no need to fetch new tokens from namenode. Like the * previous test, this test should also run while namenode is down. The * setup for this test depends on the previous test. */ // restart the namenode and then shut it down for test cluster.restartNameNode(0); cluster.shutdownNameNode(0); // verify blockSeekTo() still works (forced to use cached tokens) in1.seek(0); assertTrue(checkFile1(in1, expected)); // verify again blockSeekTo() still works (forced to use cached tokens) if (isStriped) { in2.seek(0); } else { in2.seekToNewSource(0); } assertTrue(checkFile1(in2, expected)); // verify fetchBlockByteRange() still works (forced to use cached tokens) assertTrue(checkFile2(in3, expected)); /* * testing that after both namenode and datanodes got restarted (namenode * first, followed by datanodes), DFSClient can't access DN without * re-fetching tokens and is able to re-fetch tokens transparently. The * setup of this test depends on the previous test. */ // restore the cluster and restart the datanodes for test cluster.restartNameNode(0); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); // shutdown namenode so that DFSClient can't get new tokens from namenode cluster.shutdownNameNode(0); // verify blockSeekTo() fails (cached tokens become invalid) in1.seek(0); assertFalse(checkFile1(in1, expected)); // verify fetchBlockByteRange() fails (cached tokens become invalid) assertFalse(checkFile2(in3, expected)); // restart the namenode to allow DFSClient to re-fetch tokens cluster.restartNameNode(0); // verify blockSeekTo() works again (by transparently re-fetching // tokens from namenode) in1.seek(0); assertTrue(checkFile1(in1, expected)); if (isStriped) { in2.seek(0); } else { in2.seekToNewSource(0); } assertTrue(checkFile1(in2, expected)); // verify fetchBlockByteRange() works again (by transparently // re-fetching tokens from namenode) assertTrue(checkFile2(in3, expected)); /* * testing that when datanodes are restarted on different ports, DFSClient * is able to re-fetch tokens transparently to connect to them */ // restart datanodes on newly assigned ports assertTrue(cluster.restartDataNodes(false)); cluster.waitActive(); assertEquals(numDataNodes, cluster.getDataNodes().size()); // verify blockSeekTo() is able to re-fetch token transparently in1.seek(0); assertTrue(checkFile1(in1, expected)); // verify blockSeekTo() is able to re-fetch token transparently if (isStriped) { in2.seek(0); } else { in2.seekToNewSource(0); } assertTrue(checkFile1(in2, expected)); // verify fetchBlockByteRange() is able to re-fetch token transparently assertTrue(checkFile2(in3, expected)); }
/** * Compute all the replication and invalidation work for the given BlockManager. * * <p>This differs from the above functions in that it computes replication work for all DNs * rather than a particular subset, regardless of invalidation/replication limit configurations. * * <p>NB: you may want to set {@link DFSConfigKeys#DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to a * high value to ensure that all work is calculated. */ public static int computeAllPendingWork(BlockManager bm) { int work = computeInvalidationWork(bm); work += bm.computeReplicationWork(Integer.MAX_VALUE); return work; }
/** * Call heartbeat check function of HeartbeatManager * * @param bm the BlockManager to manipulate */ public static void checkHeartbeat(BlockManager bm) { bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck(); }
/** * @return computed block replication and block invalidation work that can be scheduled on * data-nodes. * @throws IOException */ public static int getComputedDatanodeWork(final BlockManager blockManager) throws IOException { return blockManager.computeDatanodeWork(); }
public static void setNodeReplicationLimit(final BlockManager blockManager, final int limit) { blockManager.maxReplicationStreams = limit; }
/** * Call heartbeat check function of HeartbeatManager * * @param bm the BlockManager to manipulate */ public static void rescanPostponedMisreplicatedBlocks(BlockManager bm) { bm.rescanPostponedMisreplicatedBlocks(); }