// TODO consider parallelizing these operations since they are independent. Right now its just
  // easier to keep them serial though
  @Override
  public void snapshotRegions(List<Pair<HRegionInfo, ServerName>> regionsAndLocations)
      throws IOException, KeeperException {
    try {
      timeoutInjector.start();

      // 1. get all the regions hosting this table.

      // extract each pair to separate lists
      Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
      for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
        regions.add(p.getFirst());
      }

      // 2. for each region, write all the info to disk
      String msg =
          "Starting to write region info and WALs for regions for offline snapshot:"
              + ClientSnapshotDescriptionUtils.toString(snapshot);
      LOG.info(msg);
      status.setStatus(msg);

      ThreadPoolExecutor exec = SnapshotManifest.createExecutor(conf, "DisabledTableSnapshot");
      try {
        ModifyRegionUtils.editRegions(
            exec,
            regions,
            new ModifyRegionUtils.RegionEditTask() {
              @Override
              public void editRegion(final HRegionInfo regionInfo) throws IOException {
                snapshotManifest.addRegion(FSUtils.getTableDir(rootDir, snapshotTable), regionInfo);
              }
            });
      } finally {
        exec.shutdown();
      }
    } catch (Exception e) {
      // make sure we capture the exception to propagate back to the client later
      String reason =
          "Failed snapshot "
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " due to exception:"
              + e.getMessage();
      ForeignException ee = new ForeignException(reason, e);
      monitor.receive(ee);
      status.abort("Snapshot of table: " + snapshotTable + " failed because " + e.getMessage());
    } finally {
      LOG.debug(
          "Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot) + " as finished.");

      // 3. mark the timer as finished - even if we got an exception, we don't need to time the
      // operation any further
      timeoutInjector.complete();
    }
  }
  /**
   * Clone specified regions. For each region create a new region and create a HFileLink for each
   * hfile.
   */
  private HRegionInfo[] cloneHdfsRegions(final List<HRegionInfo> regions) throws IOException {
    if (regions == null || regions.size() == 0) return null;

    final Map<String, HRegionInfo> snapshotRegions =
        new HashMap<String, HRegionInfo>(regions.size());

    // clone region info (change embedded tableName with the new one)
    HRegionInfo[] clonedRegionsInfo = new HRegionInfo[regions.size()];
    for (int i = 0; i < clonedRegionsInfo.length; ++i) {
      // clone the region info from the snapshot region info
      HRegionInfo snapshotRegionInfo = regions.get(i);
      clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo);

      // add the region name mapping between snapshot and cloned
      String snapshotRegionName = snapshotRegionInfo.getEncodedName();
      String clonedRegionName = clonedRegionsInfo[i].getEncodedName();
      regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName));
      LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName);

      // Add mapping between cloned region name and snapshot region info
      snapshotRegions.put(clonedRegionName, snapshotRegionInfo);
    }

    // create the regions on disk
    ModifyRegionUtils.createRegions(
        conf,
        tableDir.getParent(),
        tableDesc,
        clonedRegionsInfo,
        new ModifyRegionUtils.RegionFillTask() {
          public void fillRegion(final HRegion region) throws IOException {
            cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName()));
          }
        });

    return clonedRegionsInfo;
  }
Пример #3
0
  @Override
  protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState state)
      throws InterruptedException {
    if (LOG.isTraceEnabled()) {
      LOG.trace(this + " execute state=" + state);
    }
    try {
      switch (state) {
        case TRUNCATE_TABLE_PRE_OPERATION:
          // Verify if we can truncate the table
          if (!prepareTruncate(env)) {
            assert isFailed() : "the truncate should have an exception here";
            return Flow.NO_MORE_STATE;
          }

          // TODO: Move out... in the acquireLock()
          LOG.debug("waiting for '" + getTableName() + "' regions in transition");
          regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
          assert regions != null && !regions.isEmpty() : "unexpected 0 regions";
          ProcedureSyncWait.waitRegionInTransition(env, regions);

          // Call coprocessors
          preTruncate(env);

          setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
          break;
        case TRUNCATE_TABLE_REMOVE_FROM_META:
          hTableDescriptor =
              env.getMasterServices()
                  .getTableDescriptors()
                  .getDescriptor(tableName)
                  .getHTableDescriptor();
          DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
          DeleteTableProcedure.deleteAssignmentState(env, getTableName());
          setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT);
          break;
        case TRUNCATE_TABLE_CLEAR_FS_LAYOUT:
          DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
          if (!preserveSplits) {
            // if we are not preserving splits, generate a new single region
            regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
          }
          setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
          break;
        case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
          regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
          CreateTableProcedure.updateTableDescCache(env, getTableName());
          setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
          break;
        case TRUNCATE_TABLE_ADD_TO_META:
          regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
          setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
          break;
        case TRUNCATE_TABLE_ASSIGN_REGIONS:
          CreateTableProcedure.assignRegions(env, getTableName(), regions);
          setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
          hTableDescriptor = null;
          regions = null;
          break;
        case TRUNCATE_TABLE_POST_OPERATION:
          postTruncate(env);
          LOG.debug("truncate '" + getTableName() + "' completed");
          return Flow.NO_MORE_STATE;
        default:
          throw new UnsupportedOperationException("unhandled state=" + state);
      }
    } catch (HBaseException | IOException e) {
      LOG.warn("Retriable error trying to truncate table=" + getTableName() + " state=" + state, e);
    }
    return Flow.HAS_MORE_STATE;
  }