@Override public List<EndpointAffinity> getOperatorAffinity() { watch.reset(); watch.start(); Map<String, DrillbitEndpoint> endpointMap = new HashMap<String, DrillbitEndpoint>(); for (DrillbitEndpoint ep : storagePlugin.getContext().getBits()) { endpointMap.put(ep.getAddress(), ep); } Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<DrillbitEndpoint, EndpointAffinity>(); for (ServerName sn : regionsToScan.values()) { DrillbitEndpoint ep = endpointMap.get(sn.getHostname()); if (ep != null) { EndpointAffinity affinity = affinityMap.get(ep); if (affinity == null) { affinityMap.put(ep, new EndpointAffinity(ep, 1)); } else { affinity.addAffinity(1); } } } logger.debug("Took {} µs to get operator affinity", watch.elapsed(TimeUnit.NANOSECONDS) / 1000); return Lists.newArrayList(affinityMap.values()); }
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src) throws IOException { ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(conf, src); if (sn == null) { // It's not an WAL return; } // Ok, so it's an WAL String hostName = sn.getHostname(); if (LOG.isTraceEnabled()) { LOG.trace(src + " is an WAL file, so reordering blocks, last hostname will be:" + hostName); } // Just check for all blocks for (LocatedBlock lb : lbs.getLocatedBlocks()) { DatanodeInfo[] dnis = lb.getLocations(); if (dnis != null && dnis.length > 1) { boolean found = false; for (int i = 0; i < dnis.length - 1 && !found; i++) { if (hostName.equals(dnis[i].getHostName())) { // advance the other locations by one and put this one at the last place. DatanodeInfo toLast = dnis[i]; System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1); dnis[dnis.length - 1] = toLast; found = true; } } } } }
/** * Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port for openRegion. * The region server should reject this RPC. (HBASE-9721) */ @Test public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception { Assert.assertTrue(getRS().getRegion(regionName).isAvailable()); ServerName sn = getRS().getServerName(); ServerName earlierServerName = ServerName.valueOf(sn.getHostname(), sn.getPort(), 1); try { CloseRegionRequest request = RequestConverter.buildCloseRegionRequest(earlierServerName, regionName); getRS().getRSRpcServices().closeRegion(null, request); Assert.fail("The closeRegion should have been rejected"); } catch (ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); Assert.assertTrue( se.getCause().getMessage().contains("This RPC was intended for a different server")); } // actual close closeRegionNoZK(); try { AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(earlierServerName, hri, null, null); getRS().getRSRpcServices().openRegion(null, orr); Assert.fail("The openRegion should have been rejected"); } catch (ServiceException se) { Assert.assertTrue(se.getCause() instanceof IOException); Assert.assertTrue( se.getCause().getMessage().contains("This RPC was intended for a different server")); } finally { openRegion(HTU, getRS(), hri); } }
public void printLocalityAndDispersionForCurrentPlan( Map<String, Map<String, Float>> regionLocalityMap) throws IOException { SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); FavoredNodesPlan assignmentPlan = snapshot.getExistingAssignmentPlan(); Set<TableName> tables = snapshot.getTableSet(); Map<TableName, List<HRegionInfo>> tableToRegionsMap = snapshot.getTableToRegionMap(); for (TableName table : tables) { float[] locality = new float[3]; if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) { continue; } List<HRegionInfo> regions = tableToRegionsMap.get(table); for (HRegionInfo currentRegion : regions) { Map<String, Float> regionLocality = regionLocalityMap.get(currentRegion.getEncodedName()); if (regionLocality == null) { continue; } List<ServerName> servers = assignmentPlan.getFavoredNodes(currentRegion); if (servers != null) { int i = 0; for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName server = servers.get(p.ordinal()); Float currentLocality = 0f; if (servers != null) { currentLocality = regionLocality.get(server.getHostname()); if (currentLocality == null) { currentLocality = 0f; } locality[i] += currentLocality; } i++; } } } for (int i = 0; i < locality.length; i++) { String copy = null; if (i == 0) { copy = "primary"; } else if (i == 1) { copy = "secondary"; } else if (i == 2) { copy = "tertiary"; } float avgLocality = 100 * locality[i] / regions.size(); LOG.info( "For Table: " + table + " ; #Total Regions: " + regions.size() + " ; The average locality for " + copy + " is " + avgLocality + " %"); } printDispersionScores(table, snapshot, regions.size(), null, false); } }
public void renderNoFlush( @SuppressWarnings({"unused", "hiding"}) final java.io.Writer jamonWriter) throws java.io.IOException { // 35, 1 Collection<ServerName> backupMasters = null; if (master.isActiveMaster()) { ClusterStatus status = master.getClusterStatus(); backupMasters = status.getBackupMasters(); } // 43, 1 jamonWriter.write("<table class=\"table table-striped\">\n"); // 44, 1 if ((backupMasters != null && backupMasters.size() > 0)) { // 44, 59 jamonWriter.write( "\n<tr>\n <th>ServerName</th>\n <th>Port</th>\n <th>Start Time</th>\n</tr>\n"); // 50, 1 ServerName[] serverNames = backupMasters.toArray(new ServerName[backupMasters.size()]); Arrays.sort(serverNames); for (ServerName serverName : serverNames) { // 55, 1 jamonWriter.write("<tr>\n <td>"); // 56, 9 org.jamon.escaping.Escaping.HTML.write( org.jamon.emit.StandardEmitter.valueOf(serverName.getHostname()), jamonWriter); // 56, 39 jamonWriter.write("</td>\n <td>"); // 57, 9 org.jamon.escaping.Escaping.HTML.write( org.jamon.emit.StandardEmitter.valueOf(serverName.getPort()), jamonWriter); // 57, 35 jamonWriter.write("</td>\n <td>"); // 58, 9 org.jamon.escaping.Escaping.HTML.write( org.jamon.emit.StandardEmitter.valueOf(new Date(serverName.getStartcode())), jamonWriter); // 58, 50 jamonWriter.write("</td>\n</tr>\n"); // 60, 1 } } // 63, 7 jamonWriter.write("\n<tr><td>Total:"); // 64, 15 org.jamon.escaping.Escaping.HTML.write( org.jamon.emit.StandardEmitter.valueOf((backupMasters != null) ? backupMasters.size() : 0), jamonWriter); // 64, 71 jamonWriter.write("</td>\n</table>\n\n\n\n\n"); }
/** * @param implementation An {@link HRegionInterface} instance; you'll likely want to pass a mocked * HRS; can be null. * @return Mock up a connection that returns a {@link org.apache.hadoop.conf.Configuration} when * {@link HConnection#getConfiguration()} is called, a 'location' when {@link * HConnection#getRegionLocation(byte[], byte[], boolean)} is called, and that returns the * passed {@link HRegionInterface} instance when {@link * HConnection#getHRegionConnection(String, int)} is called (Be sure call {@link * HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)} when done with * this mocked Connection. * @throws IOException */ private HConnection mockConnection(final HRegionInterface implementation) throws IOException { HConnection connection = HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); Mockito.doNothing().when(connection).close(); // Make it so we return any old location when asked. final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN.getHostname(), SN.getPort()); Mockito.when( connection.getRegionLocation( (byte[]) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())) .thenReturn(anyLocation); Mockito.when(connection.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())) .thenReturn(anyLocation); if (implementation != null) { // If a call to getHRegionConnection, return this implementation. Mockito.when(connection.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())) .thenReturn(implementation); } return connection; }
@Test public void testRPCException() throws Exception { HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); HMaster hm = new HMaster(conf); ServerName sm = hm.getServerName(); InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); int i = 0; // retry the RPC a few times; we have seen SocketTimeoutExceptions if we // try to connect too soon. Retry on SocketTimeoutException. while (i < 20) { try { MasterMonitorProtocol inf = (MasterMonitorProtocol) HBaseClientRPC.getProxy(MasterMonitorProtocol.class, isa, conf, 100 * 10); inf.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); fail(); } catch (ServiceException ex) { IOException ie = ProtobufUtil.getRemoteException(ex); if (!(ie instanceof SocketTimeoutException)) { if (ie.getMessage() .startsWith( "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")) { return; } } else { System.err.println("Got SocketTimeoutException. Will retry. "); } } catch (Throwable t) { fail("Unexpected throwable: " + t); } Thread.sleep(100); i++; } fail(); }
/** * Calls {@link #getMockedConnection(Configuration)} and then mocks a few more of the popular * {@link HConnection} methods so they do 'normal' operation (see return doc below for list). Be * sure to shutdown the connection when done by calling {@link * HConnectionManager#deleteConnection(Configuration, boolean)} else it will stick around; this is * probably not what you want. * * @param implementation An {@link HRegionInterface} instance; you'll likely want to pass a mocked * HRS; can be null. * @param conf Configuration to use * @param implementation An HRegionInterface; can be null but is usually itself a mock. * @param sn ServerName to include in the region location returned by this <code>implementation * </code> * @param hri HRegionInfo to include in the location returned when getRegionLocation is called on * the mocked connection * @return Mock up a connection that returns a {@link Configuration} when {@link * HConnection#getConfiguration()} is called, a 'location' when {@link * HConnection#getRegionLocation(byte[], byte[], boolean)} is called, and that returns the * passed {@link HRegionInterface} instance when {@link * HConnection#getHRegionConnection(String, int)} is called (Be sure call {@link * HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration, boolean)} when * done with this mocked Connection. * @throws IOException */ public static HConnection getMockedConnectionAndDecorate( final Configuration conf, final HRegionInterface implementation, final ServerName sn, final HRegionInfo hri) throws IOException { HConnection c = HConnectionTestingUtility.getMockedConnection(conf); Mockito.doNothing().when(c).close(); // Make it so we return a particular location when asked. final HRegionLocation loc = new HRegionLocation(hri, sn.getHostname(), sn.getPort()); Mockito.when( c.getRegionLocation( (byte[]) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())) .thenReturn(loc); Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc); if (implementation != null) { // If a call to getHRegionConnection, return this implementation. Mockito.when(c.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())) .thenReturn(implementation); } return c; }
/** * @param sn ServerName to get a connection against. * @return The HRegionInterface we got when we connected to <code>sn</code> May have come from * cache, may not be good, may have been setup by this invocation, or may be null. * @throws IOException */ private HRegionInterface getCachedConnection(ServerName sn) throws IOException { if (sn == null) { return null; } HRegionInterface protocol = null; try { protocol = connection.getHRegionConnection(sn.getHostname(), sn.getPort()); } catch (RetriesExhaustedException e) { if (e.getCause() != null && e.getCause() instanceof ConnectException) { // Catch this; presume it means the cached connection has gone bad. } else { throw e; } } catch (SocketTimeoutException e) { LOG.debug("Timed out connecting to " + sn); } catch (NoRouteToHostException e) { LOG.debug("Connecting to " + sn, e); } catch (SocketException e) { LOG.debug("Exception connecting to " + sn); } catch (UnknownHostException e) { LOG.debug("Unknown host exception connecting to " + sn); } catch (IOException ioe) { Throwable cause = ioe.getCause(); if (ioe instanceof ConnectException) { // Catch. Connect refused. } else if (cause != null && cause instanceof EOFException) { // Catch. Other end disconnected us. } else if (cause != null && cause.getMessage() != null && cause.getMessage().toLowerCase().contains("connection reset")) { // Catch. Connection reset. } else { throw ioe; } } return protocol; }
/** * Compares two plans and check whether the locality dropped or increased (prints the information * as a string) also prints the baseline locality * * @param movesPerTable - how many primary regions will move per table * @param regionLocalityMap - locality map from FS * @param newPlan - new assignment plan * @throws IOException */ public void checkDifferencesWithOldPlan( Map<TableName, Integer> movesPerTable, Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan newPlan) throws IOException { // localities for primary, secondary and tertiary SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot(); FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan(); Set<TableName> tables = snapshot.getTableSet(); Map<TableName, List<HRegionInfo>> tableToRegionsMap = snapshot.getTableToRegionMap(); for (TableName table : tables) { float[] deltaLocality = new float[3]; float[] locality = new float[3]; if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) { continue; } List<HRegionInfo> regions = tableToRegionsMap.get(table); System.out.println("=================================================="); System.out.println("Assignment Plan Projection Report For Table: " + table); System.out.println("\t Total regions: " + regions.size()); System.out.println( "\t" + movesPerTable.get(table) + " primaries will move due to their primary has changed"); for (HRegionInfo currentRegion : regions) { Map<String, Float> regionLocality = regionLocalityMap.get(currentRegion.getEncodedName()); if (regionLocality == null) { continue; } List<ServerName> oldServers = oldPlan.getFavoredNodes(currentRegion); List<ServerName> newServers = newPlan.getFavoredNodes(currentRegion); if (newServers != null && oldServers != null) { int i = 0; for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName newServer = newServers.get(p.ordinal()); ServerName oldServer = oldServers.get(p.ordinal()); Float oldLocality = 0f; if (oldServers != null) { oldLocality = regionLocality.get(oldServer.getHostname()); if (oldLocality == null) { oldLocality = 0f; } locality[i] += oldLocality; } Float newLocality = regionLocality.get(newServer.getHostname()); if (newLocality == null) { newLocality = 0f; } deltaLocality[i] += newLocality - oldLocality; i++; } } } DecimalFormat df = new java.text.DecimalFormat("#.##"); for (int i = 0; i < deltaLocality.length; i++) { System.out.print("\t\t Baseline locality for "); if (i == 0) { System.out.print("primary "); } else if (i == 1) { System.out.print("secondary "); } else if (i == 2) { System.out.print("tertiary "); } System.out.println(df.format(100 * locality[i] / regions.size()) + "%"); System.out.print("\t\t Locality will change with the new plan: "); System.out.println(df.format(100 * deltaLocality[i] / regions.size()) + "%"); } System.out.println("\t Baseline dispersion"); printDispersionScores(table, snapshot, regions.size(), null, true); System.out.println("\t Projected dispersion"); printDispersionScores(table, snapshot, regions.size(), newPlan, true); } }
/** * Generate the assignment plan for the existing table * * @param tableName * @param assignmentSnapshot * @param regionLocalityMap * @param plan * @param munkresForSecondaryAndTertiary if set on true the assignment plan for the tertiary and * secondary will be generated with Munkres algorithm, otherwise will be generated using * placeSecondaryAndTertiaryRS * @throws IOException */ private void genAssignmentPlan( TableName tableName, SnapshotOfRegionAssignmentFromMeta assignmentSnapshot, Map<String, Map<String, Float>> regionLocalityMap, FavoredNodesPlan plan, boolean munkresForSecondaryAndTertiary) throws IOException { // Get the all the regions for the current table List<HRegionInfo> regions = assignmentSnapshot.getTableToRegionMap().get(tableName); int numRegions = regions.size(); // Get the current assignment map Map<HRegionInfo, ServerName> currentAssignmentMap = assignmentSnapshot.getRegionToRegionServerMap(); // Get the all the region servers List<ServerName> servers = new ArrayList<ServerName>(); try (Admin admin = this.connection.getAdmin()) { servers.addAll(admin.getClusterStatus().getServers()); } LOG.info( "Start to generate assignment plan for " + numRegions + " regions from table " + tableName + " with " + servers.size() + " region servers"); int slotsPerServer = (int) Math.ceil((float) numRegions / servers.size()); int regionSlots = slotsPerServer * servers.size(); // Compute the primary, secondary and tertiary costs for each region/server // pair. These costs are based only on node locality and rack locality, and // will be modified later. float[][] primaryCost = new float[numRegions][regionSlots]; float[][] secondaryCost = new float[numRegions][regionSlots]; float[][] tertiaryCost = new float[numRegions][regionSlots]; if (this.enforceLocality && regionLocalityMap != null) { // Transform the locality mapping into a 2D array, assuming that any // unspecified locality value is 0. float[][] localityPerServer = new float[numRegions][regionSlots]; for (int i = 0; i < numRegions; i++) { Map<String, Float> serverLocalityMap = regionLocalityMap.get(regions.get(i).getEncodedName()); if (serverLocalityMap == null) { continue; } for (int j = 0; j < servers.size(); j++) { String serverName = servers.get(j).getHostname(); if (serverName == null) { continue; } Float locality = serverLocalityMap.get(serverName); if (locality == null) { continue; } for (int k = 0; k < slotsPerServer; k++) { // If we can't find the locality of a region to a server, which occurs // because locality is only reported for servers which have some // blocks of a region local, then the locality for that pair is 0. localityPerServer[i][j * slotsPerServer + k] = locality.floatValue(); } } } // Compute the total rack locality for each region in each rack. The total // rack locality is the sum of the localities of a region on all servers in // a rack. Map<String, Map<HRegionInfo, Float>> rackRegionLocality = new HashMap<String, Map<HRegionInfo, Float>>(); for (int i = 0; i < numRegions; i++) { HRegionInfo region = regions.get(i); for (int j = 0; j < regionSlots; j += slotsPerServer) { String rack = rackManager.getRack(servers.get(j / slotsPerServer)); Map<HRegionInfo, Float> rackLocality = rackRegionLocality.get(rack); if (rackLocality == null) { rackLocality = new HashMap<HRegionInfo, Float>(); rackRegionLocality.put(rack, rackLocality); } Float localityObj = rackLocality.get(region); float locality = localityObj == null ? 0 : localityObj.floatValue(); locality += localityPerServer[i][j]; rackLocality.put(region, locality); } } for (int i = 0; i < numRegions; i++) { for (int j = 0; j < regionSlots; j++) { String rack = rackManager.getRack(servers.get(j / slotsPerServer)); Float totalRackLocalityObj = rackRegionLocality.get(rack).get(regions.get(i)); float totalRackLocality = totalRackLocalityObj == null ? 0 : totalRackLocalityObj.floatValue(); // Primary cost aims to favor servers with high node locality and low // rack locality, so that secondaries and tertiaries can be chosen for // nodes with high rack locality. This might give primaries with // slightly less locality at first compared to a cost which only // considers the node locality, but should be better in the long run. primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] - totalRackLocality); // Secondary cost aims to favor servers with high node locality and high // rack locality since the tertiary will be chosen from the same rack as // the secondary. This could be negative, but that is okay. secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality); // Tertiary cost is only concerned with the node locality. It will later // be restricted to only hosts on the same rack as the secondary. tertiaryCost[i][j] = 1 - localityPerServer[i][j]; } } } if (this.enforceMinAssignmentMove && currentAssignmentMap != null) { // We want to minimize the number of regions which move as the result of a // new assignment. Therefore, slightly penalize any placement which is for // a host that is not currently serving the region. for (int i = 0; i < numRegions; i++) { for (int j = 0; j < servers.size(); j++) { ServerName currentAddress = currentAssignmentMap.get(regions.get(i)); if (currentAddress != null && !currentAddress.equals(servers.get(j))) { for (int k = 0; k < slotsPerServer; k++) { primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY; } } } } } // Artificially increase cost of last slot of each server to evenly // distribute the slop, otherwise there will be a few servers with too few // regions and many servers with the max number of regions. for (int i = 0; i < numRegions; i++) { for (int j = 0; j < regionSlots; j += slotsPerServer) { primaryCost[i][j] += LAST_SLOT_COST_PENALTY; secondaryCost[i][j] += LAST_SLOT_COST_PENALTY; tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY; } } RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); primaryCost = randomizedMatrix.transform(primaryCost); int[] primaryAssignment = new MunkresAssignment(primaryCost).solve(); primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment); // Modify the secondary and tertiary costs for each region/server pair to // prevent a region from being assigned to the same rack for both primary // and either one of secondary or tertiary. for (int i = 0; i < numRegions; i++) { int slot = primaryAssignment[i]; String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); for (int k = 0; k < servers.size(); k++) { if (!rackManager.getRack(servers.get(k)).equals(rack)) { continue; } if (k == slot / slotsPerServer) { // Same node, do not place secondary or tertiary here ever. for (int m = 0; m < slotsPerServer; m++) { secondaryCost[i][k * slotsPerServer + m] = MAX_COST; tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; } } else { // Same rack, do not place secondary or tertiary here if possible. for (int m = 0; m < slotsPerServer; m++) { secondaryCost[i][k * slotsPerServer + m] = AVOID_COST; tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; } } } } if (munkresForSecondaryAndTertiary) { randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); secondaryCost = randomizedMatrix.transform(secondaryCost); int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve(); secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment); // Modify the tertiary costs for each region/server pair to ensure that a // region is assigned to a tertiary server on the same rack as its secondary // server, but not the same server in that rack. for (int i = 0; i < numRegions; i++) { int slot = secondaryAssignment[i]; String rack = rackManager.getRack(servers.get(slot / slotsPerServer)); for (int k = 0; k < servers.size(); k++) { if (k == slot / slotsPerServer) { // Same node, do not place tertiary here ever. for (int m = 0; m < slotsPerServer; m++) { tertiaryCost[i][k * slotsPerServer + m] = MAX_COST; } } else { if (rackManager.getRack(servers.get(k)).equals(rack)) { continue; } // Different rack, do not place tertiary here if possible. for (int m = 0; m < slotsPerServer; m++) { tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST; } } } } randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots); tertiaryCost = randomizedMatrix.transform(tertiaryCost); int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve(); tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment); for (int i = 0; i < numRegions; i++) { List<ServerName> favoredServers = new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); ServerName s = servers.get(primaryAssignment[i] / slotsPerServer); favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); s = servers.get(secondaryAssignment[i] / slotsPerServer); favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); s = servers.get(tertiaryAssignment[i] / slotsPerServer); favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); // Update the assignment plan plan.updateAssignmentPlan(regions.get(i), favoredServers); } LOG.info( "Generated the assignment plan for " + numRegions + " regions from table " + tableName + " with " + servers.size() + " region servers"); LOG.info("Assignment plan for secondary and tertiary generated " + "using MunkresAssignment"); } else { Map<HRegionInfo, ServerName> primaryRSMap = new HashMap<HRegionInfo, ServerName>(); for (int i = 0; i < numRegions; i++) { primaryRSMap.put(regions.get(i), servers.get(primaryAssignment[i] / slotsPerServer)); } FavoredNodeAssignmentHelper favoredNodeHelper = new FavoredNodeAssignmentHelper(servers, conf); favoredNodeHelper.initialize(); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = favoredNodeHelper.placeSecondaryAndTertiaryWithRestrictions(primaryRSMap); for (int i = 0; i < numRegions; i++) { List<ServerName> favoredServers = new ArrayList<ServerName>(FavoredNodeAssignmentHelper.FAVORED_NODES_NUM); HRegionInfo currentRegion = regions.get(i); ServerName s = primaryRSMap.get(currentRegion); favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); ServerName[] secondaryAndTertiary = secondaryAndTertiaryMap.get(currentRegion); s = secondaryAndTertiary[0]; favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); s = secondaryAndTertiary[1]; favoredServers.add( ServerName.valueOf(s.getHostname(), s.getPort(), ServerName.NON_STARTCODE)); // Update the assignment plan plan.updateAssignmentPlan(regions.get(i), favoredServers); } LOG.info( "Generated the assignment plan for " + numRegions + " regions from table " + tableName + " with " + servers.size() + " region servers"); LOG.info( "Assignment plan for secondary and tertiary generated " + "using placeSecondaryAndTertiaryWithRestrictions method"); } }
public void printStats() throws IOException { Admin admin = connection.getAdmin(); ClusterStatus status = admin.getClusterStatus(); // co ClusterStatusExample-1-GetStatus Get the cluster status. System.out.println("Cluster Status:\n--------------"); System.out.println("HBase Version: " + status.getHBaseVersion()); System.out.println("Version: " + status.getVersion()); System.out.println("Cluster ID: " + status.getClusterId()); System.out.println("Master: " + status.getMaster()); System.out.println("No. Backup Masters: " + status.getBackupMastersSize()); System.out.println("Backup Masters: " + status.getBackupMasters()); System.out.println("No. Live Servers: " + status.getServersSize()); System.out.println("Servers: " + status.getServers()); System.out.println("No. Dead Servers: " + status.getDeadServers()); System.out.println("Dead Servers: " + status.getDeadServerNames()); System.out.println("No. Regions: " + status.getRegionsCount()); System.out.println("Regions in Transition: " + status.getRegionsInTransition()); System.out.println("No. Requests: " + status.getRequestsCount()); System.out.println("Avg Load: " + status.getAverageLoad()); System.out.println("Balancer On: " + status.getBalancerOn()); System.out.println("Is Balancer On: " + status.isBalancerOn()); System.out.println("Master Coprocessors: " + Arrays.asList(status.getMasterCoprocessors())); System.out.println("\nServer Info:\n--------------"); for (ServerName server : status .getServers()) { // co ClusterStatusExample-2-ServerInfo Iterate over the included // server instances. System.out.println("Hostname: " + server.getHostname()); System.out.println("Host and Port: " + server.getHostAndPort()); System.out.println("Server Name: " + server.getServerName()); System.out.println("RPC Port: " + server.getPort()); System.out.println("Start Code: " + server.getStartcode()); ServerLoad load = status.getLoad( server); // co ClusterStatusExample-3-ServerLoad Retrieve the load details for the // current server. System.out.println("\nServer Load:\n--------------"); System.out.println("Info Port: " + load.getInfoServerPort()); System.out.println("Load: " + load.getLoad()); System.out.println("Max Heap (MB): " + load.getMaxHeapMB()); System.out.println("Used Heap (MB): " + load.getUsedHeapMB()); System.out.println("Memstore Size (MB): " + load.getMemstoreSizeInMB()); System.out.println("No. Regions: " + load.getNumberOfRegions()); System.out.println("No. Requests: " + load.getNumberOfRequests()); System.out.println("Total No. Requests: " + load.getTotalNumberOfRequests()); System.out.println("No. Requests per Sec: " + load.getRequestsPerSecond()); System.out.println("No. Read Requests: " + load.getReadRequestsCount()); System.out.println("No. Write Requests: " + load.getWriteRequestsCount()); System.out.println("No. Stores: " + load.getStores()); System.out.println("Store Size Uncompressed (MB): " + load.getStoreUncompressedSizeMB()); System.out.println("No. Storefiles: " + load.getStorefiles()); System.out.println("Storefile Size (MB): " + load.getStorefileSizeInMB()); System.out.println("Storefile Index Size (MB): " + load.getStorefileIndexSizeInMB()); System.out.println("Root Index Size: " + load.getRootIndexSizeKB()); System.out.println("Total Bloom Size: " + load.getTotalStaticBloomSizeKB()); System.out.println("Total Index Size: " + load.getTotalStaticIndexSizeKB()); System.out.println("Current Compacted Cells: " + load.getCurrentCompactedKVs()); System.out.println("Total Compacting Cells: " + load.getTotalCompactingKVs()); System.out.println("Coprocessors1: " + Arrays.asList(load.getRegionServerCoprocessors())); System.out.println("Coprocessors2: " + Arrays.asList(load.getRsCoprocessors())); System.out.println("Replication Load Sink: " + load.getReplicationLoadSink()); System.out.println("Replication Load Source: " + load.getReplicationLoadSourceList()); System.out.println("\nRegion Load:\n--------------"); for (Map.Entry<byte[], RegionLoad> entry : // co ClusterStatusExample-4-Regions Iterate over the region details of the // current server. load.getRegionsLoad().entrySet()) { System.out.println("Region: " + Bytes.toStringBinary(entry.getKey())); RegionLoad regionLoad = entry .getValue(); // co ClusterStatusExample-5-RegionLoad Get the load details for the // current region. System.out.println("Name: " + Bytes.toStringBinary(regionLoad.getName())); System.out.println("Name (as String): " + regionLoad.getNameAsString()); System.out.println("No. Requests: " + regionLoad.getRequestsCount()); System.out.println("No. Read Requests: " + regionLoad.getReadRequestsCount()); System.out.println("No. Write Requests: " + regionLoad.getWriteRequestsCount()); System.out.println("No. Stores: " + regionLoad.getStores()); System.out.println("No. Storefiles: " + regionLoad.getStorefiles()); System.out.println("Data Locality: " + regionLoad.getDataLocality()); System.out.println("Storefile Size (MB): " + regionLoad.getStorefileSizeMB()); System.out.println("Storefile Index Size (MB): " + regionLoad.getStorefileIndexSizeMB()); System.out.println("Memstore Size (MB): " + regionLoad.getMemStoreSizeMB()); System.out.println("Root Index Size: " + regionLoad.getRootIndexSizeKB()); System.out.println("Total Bloom Size: " + regionLoad.getTotalStaticBloomSizeKB()); System.out.println("Total Index Size: " + regionLoad.getTotalStaticIndexSizeKB()); System.out.println("Current Compacted Cells: " + regionLoad.getCurrentCompactedKVs()); System.out.println("Total Compacting Cells: " + regionLoad.getTotalCompactingKVs()); System.out.println(); } } }
public void fillUp( TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot, Map<String, Map<String, Float>> regionLocalityMap) { // Set the table name this.tableName = tableName; // Get all the regions for this table List<HRegionInfo> regionInfoList = snapshot.getTableToRegionMap().get(tableName); // Get the total region num for the current table this.totalRegions = regionInfoList.size(); // Get the existing assignment plan FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan(); // Get the region to region server mapping Map<HRegionInfo, ServerName> currentAssignment = snapshot.getRegionToRegionServerMap(); // Initialize the server to its hosing region counter map Map<ServerName, Integer> serverToHostingRegionCounterMap = new HashMap<ServerName, Integer>(); Map<ServerName, Integer> primaryRSToRegionCounterMap = new HashMap<ServerName, Integer>(); Map<ServerName, Set<ServerName>> primaryToSecTerRSMap = new HashMap<ServerName, Set<ServerName>>(); // Check the favored nodes and its locality information // Also keep tracker of the most loaded and least loaded region servers for (HRegionInfo region : regionInfoList) { try { ServerName currentRS = currentAssignment.get(region); // Handle unassigned regions if (currentRS == null) { unAssignedRegionsList.add(region); continue; } // Keep updating the server to is hosting region counter map Integer hostRegionCounter = serverToHostingRegionCounterMap.get(currentRS); if (hostRegionCounter == null) { hostRegionCounter = Integer.valueOf(0); } hostRegionCounter = hostRegionCounter.intValue() + 1; serverToHostingRegionCounterMap.put(currentRS, hostRegionCounter); // Get the favored nodes from the assignment plan and verify it. List<ServerName> favoredNodes = favoredNodesAssignment.getFavoredNodes(region); if (favoredNodes == null || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) { regionsWithoutValidFavoredNodes.add(region); continue; } // Get the primary, secondary and tertiary region server ServerName primaryRS = favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal()); ServerName secondaryRS = favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal()); ServerName tertiaryRS = favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal()); // Update the primary rs to its region set map Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS); if (regionCounter == null) { regionCounter = Integer.valueOf(0); } regionCounter = regionCounter.intValue() + 1; primaryRSToRegionCounterMap.put(primaryRS, regionCounter); // Update the primary rs to secondary and tertiary rs map Set<ServerName> secAndTerSet = primaryToSecTerRSMap.get(primaryRS); if (secAndTerSet == null) { secAndTerSet = new HashSet<ServerName>(); } secAndTerSet.add(secondaryRS); secAndTerSet.add(tertiaryRS); primaryToSecTerRSMap.put(primaryRS, secAndTerSet); // Get the position of the current region server in the favored nodes list FavoredNodesPlan.Position favoredNodePosition = FavoredNodesPlan.getFavoredServerPosition(favoredNodes, currentRS); // Handle the non favored assignment. if (favoredNodePosition == null) { nonFavoredAssignedRegionList.add(region); continue; } // Increase the favored nodes assignment. this.favoredNodes[favoredNodePosition.ordinal()]++; totalFavoredAssignments++; // Summary the locality information for each favored nodes if (regionLocalityMap != null) { // Set the enforce locality as true; this.enforceLocality = true; // Get the region degree locality map Map<String, Float> regionDegreeLocalityMap = regionLocalityMap.get(region.getEncodedName()); if (regionDegreeLocalityMap == null) { continue; // ignore the region which doesn't have any store files. } // Get the locality summary for each favored nodes for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) { ServerName favoredNode = favoredNodes.get(p.ordinal()); // Get the locality for the current favored nodes Float locality = regionDegreeLocalityMap.get(favoredNode.getHostname()); if (locality != null) { this.favoredNodesLocalitySummary[p.ordinal()] += locality; } } // Get the locality summary for the current region server Float actualLocality = regionDegreeLocalityMap.get(currentRS.getHostname()); if (actualLocality != null) { this.actualLocalitySummary += actualLocality; } } } catch (Exception e) { LOG.error( "Cannot verify the region assignment for region " + ((region == null) ? " null " : region.getRegionNameAsString()) + "because of " + e); } } float dispersionScoreSummary = 0; float dispersionNumSummary = 0; // Calculate the secondary score for each primary region server for (Map.Entry<ServerName, Integer> entry : primaryRSToRegionCounterMap.entrySet()) { ServerName primaryRS = entry.getKey(); Integer regionsOnPrimary = entry.getValue(); // Process the dispersion number and score float dispersionScore = 0; int dispersionNum = 0; if (primaryToSecTerRSMap.get(primaryRS) != null && regionsOnPrimary.intValue() != 0) { dispersionNum = primaryToSecTerRSMap.get(primaryRS).size(); dispersionScore = dispersionNum / ((float) regionsOnPrimary.intValue() * 2); } // Update the max dispersion score if (dispersionScore > this.maxDispersionScore) { this.maxDispersionScoreServerSet.clear(); this.maxDispersionScoreServerSet.add(primaryRS); this.maxDispersionScore = dispersionScore; } else if (dispersionScore == this.maxDispersionScore) { this.maxDispersionScoreServerSet.add(primaryRS); } // Update the max dispersion num if (dispersionNum > this.maxDispersionNum) { this.maxDispersionNumServerSet.clear(); this.maxDispersionNumServerSet.add(primaryRS); this.maxDispersionNum = dispersionNum; } else if (dispersionNum == this.maxDispersionNum) { this.maxDispersionNumServerSet.add(primaryRS); } // Update the min dispersion score if (dispersionScore < this.minDispersionScore) { this.minDispersionScoreServerSet.clear(); this.minDispersionScoreServerSet.add(primaryRS); this.minDispersionScore = dispersionScore; } else if (dispersionScore == this.minDispersionScore) { this.minDispersionScoreServerSet.add(primaryRS); } // Update the min dispersion num if (dispersionNum < this.minDispersionNum) { this.minDispersionNumServerSet.clear(); this.minDispersionNumServerSet.add(primaryRS); this.minDispersionNum = dispersionNum; } else if (dispersionNum == this.minDispersionNum) { this.minDispersionNumServerSet.add(primaryRS); } dispersionScoreSummary += dispersionScore; dispersionNumSummary += dispersionNum; } // Update the avg dispersion score if (primaryRSToRegionCounterMap.keySet().size() != 0) { this.avgDispersionScore = dispersionScoreSummary / (float) primaryRSToRegionCounterMap.keySet().size(); this.avgDispersionNum = dispersionNumSummary / (float) primaryRSToRegionCounterMap.keySet().size(); } // Fill up the most loaded and least loaded region server information for (Map.Entry<ServerName, Integer> entry : serverToHostingRegionCounterMap.entrySet()) { ServerName currentRS = entry.getKey(); int hostRegionCounter = entry.getValue().intValue(); // Update the most loaded region server list and maxRegionsOnRS if (hostRegionCounter > this.maxRegionsOnRS) { maxRegionsOnRS = hostRegionCounter; this.mostLoadedRSSet.clear(); this.mostLoadedRSSet.add(currentRS); } else if (hostRegionCounter == this.maxRegionsOnRS) { this.mostLoadedRSSet.add(currentRS); } // Update the least loaded region server list and minRegionsOnRS if (hostRegionCounter < this.minRegionsOnRS) { this.minRegionsOnRS = hostRegionCounter; this.leastLoadedRSSet.clear(); this.leastLoadedRSSet.add(currentRS); } else if (hostRegionCounter == this.minRegionsOnRS) { this.leastLoadedRSSet.add(currentRS); } } // and total region servers this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size(); this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 : (totalRegions / (float) totalRegionServers); // Set the isFilledUp as true isFilledUp = true; }
/** * Test that MetaReader will ride over server throwing "Server not running" IOEs. * * @see https://issues.apache.org/jira/browse/HBASE-3446 * @throws IOException * @throws InterruptedException */ @Test public void testRideOverServerNotRunning() throws IOException, InterruptedException { // Need a zk watcher. ZooKeeperWatcher zkw = new ZooKeeperWatcher( UTIL.getConfiguration(), this.getClass().getSimpleName(), ABORTABLE, true); // This is a servername we use in a few places below. ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis()); HConnection connection = null; CatalogTracker ct = null; try { // Mock an HRegionInterface. Our mock implementation will fail a few // times when we go to open a scanner. final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); // When openScanner called throw IOE 'Server not running' a few times // before we return a scanner id. Whats WEIRD is that these // exceptions do not show in the log because they are caught and only // printed if we FAIL. We eventually succeed after retry so these don't // show. We will know if they happened or not because we will ask // mockito at the end of this test to verify that openscanner was indeed // called the wanted number of times. final long scannerid = 123L; Mockito.when(implementation.openScanner((byte[]) Mockito.any(), (Scan) Mockito.any())) .thenThrow(new IOException("Server not running (1 of 3)")) .thenThrow(new IOException("Server not running (2 of 3)")) .thenThrow(new IOException("Server not running (3 of 3)")) .thenReturn(scannerid); // Make it so a verifiable answer comes back when next is called. Return // the verifiable answer and then a null so we stop scanning. Our // verifiable answer is something that looks like a row in META with // a server and startcode that is that of the above defined servername. List<KeyValue> kvs = new ArrayList<KeyValue>(); final byte[] rowToVerify = Bytes.toBytes("rowToVerify"); kvs.add( new KeyValue( rowToVerify, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO))); kvs.add( new KeyValue( rowToVerify, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(sn.getHostAndPort()))); kvs.add( new KeyValue( rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); final Result[] result = new Result[] {new Result(kvs)}; Mockito.when(implementation.next(Mockito.anyLong(), Mockito.anyInt())) .thenReturn(result) .thenReturn(null); // Associate a spied-upon HConnection with UTIL.getConfiguration. Need // to shove this in here first so it gets picked up all over; e.g. by // HTable. connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration()); // Fix the location lookup so it 'works' though no network. First // make an 'any location' object. final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(), sn.getPort()); // Return the any location object when locateRegion is called in HTable // constructor and when its called by ServerCallable (it uses getRegionLocation). // The ugly format below comes of 'Important gotcha on spying real objects!' from // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html Mockito.doReturn(anyLocation) .when(connection) .locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any()); Mockito.doReturn(anyLocation) .when(connection) .getRegionLocation((byte[]) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean()); // Now shove our HRI implementation into the spied-upon connection. Mockito.doReturn(implementation) .when(connection) .getHRegionConnection(Mockito.anyString(), Mockito.anyInt()); // Now start up the catalogtracker with our doctored Connection. ct = new CatalogTracker(zkw, null, connection, ABORTABLE, 0); ct.start(); // Scan meta for user tables and verify we got back expected answer. NavigableMap<HRegionInfo, Result> hris = MetaReader.getServerUserRegions(ct, sn); assertTrue(hris.size() == 1); assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO)); assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow())); // Finally verify that openscanner was called four times -- three times // with exception and then on 4th attempt we succeed. Mockito.verify(implementation, Mockito.times(4)) .openScanner((byte[]) Mockito.any(), (Scan) Mockito.any()); } finally { if (ct != null) ct.stop(); HConnectionManager.deleteConnection(UTIL.getConfiguration(), true); zkw.close(); } }