/** * Create a new {@link HRegionInfo} from the snapshot region info. Keep the same startKey, endKey, * regionId and split information but change the table name. * * @param snapshotRegionInfo Info for region to clone. * @return the new HRegion instance */ public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) { return new HRegionInfo( tableDesc.getName(), snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(), snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId()); }
/** * Costruct a copy of another HRegionInfo * * @param other */ public HRegionInfo(HRegionInfo other) { super(); this.endKey = other.getEndKey(); this.offLine = other.isOffline(); this.regionId = other.getRegionId(); this.regionName = other.getRegionName(); this.split = other.isSplit(); this.startKey = other.getStartKey(); this.hashCode = other.hashCode(); this.encodedName = other.getEncodedName(); this.tableName = other.tableName; this.replicaId = other.replicaId; }
/** * Convert a HRegionInfo to a RegionInfo * * @param info the HRegionInfo to convert * @return the converted RegionInfo */ public static RegionInfo convert(final HRegionInfo info) { if (info == null) return null; RegionInfo.Builder builder = RegionInfo.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(info.getTable())); builder.setRegionId(info.getRegionId()); if (info.getStartKey() != null) { builder.setStartKey(ByteStringer.wrap(info.getStartKey())); } if (info.getEndKey() != null) { builder.setEndKey(ByteStringer.wrap(info.getEndKey())); } builder.setOffline(info.isOffline()); builder.setSplit(info.isSplit()); builder.setReplicaId(info.getReplicaId()); return builder.build(); }
/** * Add a region to RegionStates with the specified state. If the region is already in * RegionStates, this call has no effect, and the original state is returned. * * @param hri the region info to create a state for * @param newState the state to the region in set to * @param serverName the server the region is transitioning on * @param lastHost the last server that hosts the region * @return the current state */ public synchronized RegionState createRegionState( final HRegionInfo hri, State newState, ServerName serverName, ServerName lastHost) { if (newState == null || (newState == State.OPEN && serverName == null)) { newState = State.OFFLINE; } if (hri.isOffline() && hri.isSplit()) { newState = State.SPLIT; serverName = null; } String encodedName = hri.getEncodedName(); RegionState regionState = regionStates.get(encodedName); if (regionState != null) { LOG.warn( "Tried to create a state for a region already in RegionStates, " + "used existing: " + regionState + ", ignored new: " + newState); } else { regionState = new RegionState(hri, newState, serverName); putRegionState(regionState); if (newState == State.OPEN) { if (!serverName.equals(lastHost)) { LOG.warn( "Open region's last host " + lastHost + " should be the same as the current one " + serverName + ", ignored the last and used the current one"); lastHost = serverName; } lastAssignments.put(encodedName, lastHost); regionAssignments.put(hri, lastHost); } else if (!isOneOfStates(regionState, State.MERGED, State.SPLIT, State.OFFLINE)) { regionsInTransition.put(encodedName, regionState); } if (lastHost != null && newState != State.SPLIT) { addToServerHoldings(lastHost, hri); if (newState != State.OPEN) { oldAssignments.put(encodedName, lastHost); } } } return regionState; }
/** * At cluster clean re/start, mark all user regions closed except those of tables that are * excluded, such as disabled/disabling/enabling tables. All user regions and their previous * locations are returned. */ synchronized Map<HRegionInfo, ServerName> closeAllUserRegions(Set<TableName> excludedTables) { boolean noExcludeTables = excludedTables == null || excludedTables.isEmpty(); Set<HRegionInfo> toBeClosed = new HashSet<HRegionInfo>(regionStates.size()); for (RegionState state : regionStates.values()) { HRegionInfo hri = state.getRegion(); if (state.isSplit() || hri.isSplit()) { continue; } TableName tableName = hri.getTable(); if (!TableName.META_TABLE_NAME.equals(tableName) && (noExcludeTables || !excludedTables.contains(tableName))) { toBeClosed.add(hri); } } Map<HRegionInfo, ServerName> allUserRegions = new HashMap<HRegionInfo, ServerName>(toBeClosed.size()); for (HRegionInfo hri : toBeClosed) { RegionState regionState = updateRegionState(hri, State.CLOSED); allUserRegions.put(hri, regionState.getServerName()); } return allUserRegions; }
/** * Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)} call * should get implemented for each snapshot flavor. */ @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings( value = "REC_CATCH_EXCEPTION", justification = "Intentional") public void process() { String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " " + eventType + " on table " + snapshotTable; LOG.info(msg); status.setStatus(msg); try { // If regions move after this meta scan, the region specific snapshot should fail, triggering // an external exception that gets captured here. // write down the snapshot info in the working directory SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs); snapshotManifest.addTableDescriptor(this.htd); monitor.rethrowException(); List<Pair<HRegionInfo, ServerName>> regionsAndLocations; if (TableName.META_TABLE_NAME.equals(snapshotTable)) { regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(server.getZooKeeper()); } else { regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( server.getConnection(), snapshotTable, false); } // run the snapshot snapshotRegions(regionsAndLocations); monitor.rethrowException(); // extract each pair to separate lists Set<String> serverNames = new HashSet<String>(); for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) { if (p != null && p.getFirst() != null && p.getSecond() != null) { HRegionInfo hri = p.getFirst(); if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) continue; serverNames.add(p.getSecond().toString()); } } // flush the in-memory state, and write the single manifest status.setStatus("Consolidate snapshot: " + snapshot.getName()); snapshotManifest.consolidate(); // verify the snapshot is valid status.setStatus("Verifying snapshot: " + snapshot.getName()); verifier.verifySnapshot(this.workingDir, serverNames); // complete the snapshot, atomically moving from tmp to .snapshot dir. completeSnapshot(this.snapshotDir, this.workingDir, this.fs); msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed"; status.markComplete(msg); LOG.info(msg); metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime()); } catch (Exception e) { // FindBugs: REC_CATCH_EXCEPTION status.abort( "Failed to complete snapshot " + snapshot.getName() + " on table " + snapshotTable + " because " + e.getMessage()); String reason = "Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " due to exception:" + e.getMessage(); LOG.error(reason, e); ForeignException ee = new ForeignException(reason, e); monitor.receive(ee); // need to mark this completed to close off and allow cleanup to happen. cancel(reason); } finally { LOG.debug("Launching cleanup of working dir:" + workingDir); try { // if the working dir is still present, the snapshot has failed. it is present we delete // it. if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) { LOG.error("Couldn't delete snapshot working directory:" + workingDir); } } catch (IOException e) { LOG.error("Couldn't delete snapshot working directory:" + workingDir); } releaseTableLock(); } }