示例#1
1
 /**
  * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
  * serialized to the stream with {@link #toDelimitedByteArray()}
  *
  * @param in
  * @return An instance of HRegionInfo.
  * @throws IOException
  */
 public static HRegionInfo parseFrom(final DataInputStream in) throws IOException {
   // I need to be able to move back in the stream if this is not a pb serialization so I can
   // do the Writable decoding instead.
   int pblen = ProtobufUtil.lengthOfPBMagic();
   byte[] pbuf = new byte[pblen];
   if (in.markSupported()) { // read it with mark()
     in.mark(pblen);
   }
   int read =
       in.read(pbuf); // assumption: if Writable serialization, it should be longer than pblen.
   if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
   if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
     return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in));
   } else {
     // Presume Writables.  Need to reset the stream since it didn't start w/ pb.
     if (in.markSupported()) {
       in.reset();
       HRegionInfo hri = new HRegionInfo();
       hri.readFields(in);
       return hri;
     } else {
       // we cannot use BufferedInputStream, it consumes more than we read from the underlying IS
       ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);
       SequenceInputStream sis = new SequenceInputStream(bais, in); // concatenate input streams
       HRegionInfo hri = new HRegionInfo();
       hri.readFields(new DataInputStream(sis));
       return hri;
     }
   }
 }
示例#2
1
文件: HFile.java 项目: joshua-g/c5
 /**
  * Populate this instance with what we find on the passed in <code>in</code> stream. Can
  * deserialize protobuf of old Writables format.
  *
  * @param in
  * @throws IOException
  * @see #write(DataOutputStream)
  */
 void read(final DataInputStream in) throws IOException {
   // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code.
   int pblen = ProtobufUtil.lengthOfPBMagic();
   byte[] pbuf = new byte[pblen];
   if (in.markSupported()) in.mark(pblen);
   int read = in.read(pbuf);
   if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen);
   if (ProtobufUtil.isPBMagicPrefix(pbuf)) {
     parsePB(HFileProtos.FileInfoProto.parseDelimitedFrom(in));
   } else {
     if (in.markSupported()) {
       in.reset();
       parseWritable(in);
     } else {
       // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS
       ByteArrayInputStream bais = new ByteArrayInputStream(pbuf);
       SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams
       // TODO: Am I leaking anything here wrapping the passed in stream?  We are not calling
       // close on the wrapped
       // streams but they should be let go after we leave this context?  I see that we keep a
       // reference to the
       // passed in inputstream but since we no longer have a reference to this after we leave,
       // we should be ok.
       parseWritable(new DataInputStream(sis));
     }
   }
 }
  /**
   * Convert a ClutserStatus to a protobuf ClusterStatus
   *
   * @return the protobuf ClusterStatus
   */
  public ClusterStatusProtos.ClusterStatus convert() {
    ClusterStatusProtos.ClusterStatus.Builder builder =
        ClusterStatusProtos.ClusterStatus.newBuilder();
    builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(getHBaseVersion()));

    for (Map.Entry<ServerName, ServerLoad> entry : liveServers.entrySet()) {
      LiveServerInfo.Builder lsi =
          LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(entry.getKey()));
      lsi.setServerLoad(entry.getValue().obtainServerLoadPB());
      builder.addLiveServers(lsi.build());
    }
    for (ServerName deadServer : getDeadServerNames()) {
      builder.addDeadServers(ProtobufUtil.toServerName(deadServer));
    }
    for (Map.Entry<String, RegionState> rit : getRegionsInTransition().entrySet()) {
      ClusterStatusProtos.RegionState rs = rit.getValue().convert();
      RegionSpecifier.Builder spec =
          RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME);
      spec.setValue(ByteString.copyFrom(Bytes.toBytes(rit.getKey())));

      RegionInTransition pbRIT =
          RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build();
      builder.addRegionsInTransition(pbRIT);
    }
    builder.setClusterId(new ClusterId(getClusterId()).convert());
    for (String coprocessor : getMasterCoprocessors()) {
      builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor));
    }
    builder.setMaster(ProtobufUtil.toServerName(getMaster()));
    for (ServerName backup : getBackupMasters()) {
      builder.addBackupMasters(ProtobufUtil.toServerName(backup));
    }
    builder.setBalancerOn(balancerOn);
    return builder.build();
  }
 /**
  * Convert a protobuf ClusterStatus to a ClusterStatus
  *
  * @param proto the protobuf ClusterStatus
  * @return the converted ClusterStatus
  */
 public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
   Map<ServerName, ServerLoad> servers = new HashMap<ServerName, ServerLoad>();
   for (LiveServerInfo lsi : proto.getLiveServersList()) {
     servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
   }
   Collection<ServerName> deadServers = new LinkedList<ServerName>();
   for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
     deadServers.add(ProtobufUtil.toServerName(sn));
   }
   Collection<ServerName> backupMasters = new LinkedList<ServerName>();
   for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
     backupMasters.add(ProtobufUtil.toServerName(sn));
   }
   final Map<String, RegionState> rit = new HashMap<String, RegionState>();
   for (RegionInTransition region : proto.getRegionsInTransitionList()) {
     String key = new String(region.getSpec().getValue().toByteArray());
     RegionState value = RegionState.convert(region.getRegionState());
     rit.put(key, value);
   }
   final int numMasterCoprocessors = proto.getMasterCoprocessorsCount();
   final String[] masterCoprocessors = new String[numMasterCoprocessors];
   for (int i = 0; i < numMasterCoprocessors; i++) {
     masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName();
   }
   return new ClusterStatus(
       proto.getHbaseVersion().getVersion(),
       ClusterId.convert(proto.getClusterId()).toString(),
       servers,
       deadServers,
       ProtobufUtil.toServerName(proto.getMaster()),
       backupMasters,
       rit,
       masterCoprocessors,
       proto.getBalancerOn());
 }
示例#5
0
 /**
  * Load the information in the SnapshotManifest. Called by SnapshotManifest.open()
  *
  * <p>If the format is v2 and there is no data-manifest, means that we are loading an in-progress
  * snapshot. Since we support rolling-upgrades, we loook for v1 and v2 regions format.
  */
 private void load() throws IOException {
   switch (getSnapshotFormat(desc)) {
     case SnapshotManifestV1.DESCRIPTOR_VERSION:
       {
         this.htd =
             FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir).getHTableDescriptor();
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
               SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
         } finally {
           tpool.shutdown();
         }
         break;
       }
     case SnapshotManifestV2.DESCRIPTOR_VERSION:
       {
         SnapshotDataManifest dataManifest = readDataManifest();
         if (dataManifest != null) {
           htd = ProtobufUtil.convertToHTableDesc(dataManifest.getTableSchema());
           regionManifests = dataManifest.getRegionManifestsList();
         } else {
           // Compatibility, load the v1 regions
           // This happens only when the snapshot is in-progress and the cache wants to refresh.
           List<SnapshotRegionManifest> v1Regions, v2Regions;
           ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
           try {
             v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
             v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc);
           } catch (InvalidProtocolBufferException e) {
             throw new CorruptedSnapshotException(
                 "unable to parse region manifest " + e.getMessage(), e);
           } finally {
             tpool.shutdown();
           }
           if (v1Regions != null && v2Regions != null) {
             regionManifests =
                 new ArrayList<SnapshotRegionManifest>(v1Regions.size() + v2Regions.size());
             regionManifests.addAll(v1Regions);
             regionManifests.addAll(v2Regions);
           } else if (v1Regions != null) {
             regionManifests = v1Regions;
           } else /* if (v2Regions != null) */ {
             regionManifests = v2Regions;
           }
         }
         break;
       }
     default:
       throw new CorruptedSnapshotException(
           "Invalid Snapshot version: " + desc.getVersion(),
           ProtobufUtil.createSnapshotDesc(desc));
   }
 }
示例#6
0
 @Override
 public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
   RSGroupAdminProtos.MoveTablesRequest.Builder builder =
       RSGroupAdminProtos.MoveTablesRequest.newBuilder().setTargetGroup(targetGroup);
   for (TableName tableName : tables) {
     builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
   }
   try {
     proxy.moveTables(null, builder.build());
   } catch (ServiceException e) {
     throw ProtobufUtil.handleRemoteException(e);
   }
 }
  /**
   * Kick off a new sub-procedure on the listener with the data stored in the passed znode.
   *
   * <p>Will attempt to create the same procedure multiple times if an procedure znode with the same
   * name is created. It is left up the coordinator to ensure this doesn't occur.
   *
   * @param path full path to the znode for the procedure to start
   */
  private synchronized void startNewSubprocedure(String path) {
    LOG.debug("Found procedure znode: " + path);
    String opName = ZKUtil.getNodeName(path);
    // start watching for an abort notification for the procedure
    String abortZNode = zkController.getAbortZNode(opName);
    try {
      if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), abortZNode)) {
        LOG.debug("Not starting:" + opName + " because we already have an abort notification.");
        return;
      }
    } catch (KeeperException e) {
      member.controllerConnectionFailure(
          "Failed to get the abort znode (" + abortZNode + ") for procedure :" + opName, e, opName);
      return;
    }

    // get the data for the procedure
    Subprocedure subproc = null;
    try {
      byte[] data = ZKUtil.getData(zkController.getWatcher(), path);
      if (!ProtobufUtil.isPBMagicPrefix(data)) {
        String msg =
            "Data in for starting procuedure "
                + opName
                + " is illegally formatted (no pb magic). "
                + "Killing the procedure: "
                + Bytes.toString(data);
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
      }
      LOG.debug("start proc data length is " + data.length);
      data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length);
      LOG.debug("Found data for znode:" + path);
      subproc = member.createSubprocedure(opName, data);
      member.submitSubprocedure(subproc);
    } catch (IllegalArgumentException iae) {
      LOG.error("Illegal argument exception", iae);
      sendMemberAborted(subproc, new ForeignException(getMemberName(), iae));
    } catch (IllegalStateException ise) {
      LOG.error("Illegal state exception ", ise);
      sendMemberAborted(subproc, new ForeignException(getMemberName(), ise));
    } catch (KeeperException e) {
      member.controllerConnectionFailure(
          "Failed to get data for new procedure:" + opName, e, opName);
    } catch (InterruptedException e) {
      member.controllerConnectionFailure(
          "Failed to get data for new procedure:" + opName, e, opName);
      Thread.currentThread().interrupt();
    }
  }
  @Override
  public void deserializeStateData(final InputStream stream) throws IOException {
    super.deserializeStateData(stream);

    MasterProcedureProtos.ModifyColumnFamilyStateData modifyCFMsg =
        MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
    user = MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo());
    tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
    cfDescriptor = ProtobufUtil.convertToHColumnDesc(modifyCFMsg.getColumnfamilySchema());
    if (modifyCFMsg.hasUnmodifiedTableSchema()) {
      unmodifiedHTableDescriptor =
          ProtobufUtil.convertToHTableDesc(modifyCFMsg.getUnmodifiedTableSchema());
    }
  }
示例#9
0
  /**
   * Check to make sure that we are OK to run the passed snapshot. Checks to make sure that we
   * aren't already running a snapshot or restore on the requested table.
   *
   * @param snapshot description of the snapshot we want to start
   * @throws HBaseSnapshotException if the filesystem could not be prepared to start the snapshot
   */
  private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot)
      throws HBaseSnapshotException {
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
    TableName snapshotTable = TableName.valueOf(snapshot.getTable());

    // make sure we aren't already running a snapshot
    if (isTakingSnapshot(snapshot)) {
      SnapshotSentinel handler = this.snapshotHandlers.get(snapshotTable);
      throw new SnapshotCreationException(
          "Rejected taking "
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " because we are already running another snapshot "
              + (handler != null
                  ? ("on the same table "
                      + ClientSnapshotDescriptionUtils.toString(handler.getSnapshot()))
                  : "with the same name"),
          ProtobufUtil.createSnapshotDesc(snapshot));
    }

    // make sure we aren't running a restore on the same table
    if (isRestoringTable(snapshotTable)) {
      throw new SnapshotCreationException(
          "Rejected taking "
              + ClientSnapshotDescriptionUtils.toString(snapshot)
              + " because we are already have a restore in progress on the same snapshot.");
    }

    try {
      // delete the working directory, since we aren't running the snapshot. Likely leftovers
      // from a failed attempt.
      fs.delete(workingDir, true);

      // recreate the working directory for the snapshot
      if (!fs.mkdirs(workingDir)) {
        throw new SnapshotCreationException(
            "Couldn't create working directory (" + workingDir + ") for snapshot",
            ProtobufUtil.createSnapshotDesc(snapshot));
      }
    } catch (HBaseSnapshotException e) {
      throw e;
    } catch (IOException e) {
      throw new SnapshotCreationException(
          "Exception while checking to see if snapshot could be started.",
          e,
          ProtobufUtil.createSnapshotDesc(snapshot));
    }
  }
示例#10
0
  @Override
  public void prepareBulkLoad(
      RpcController controller,
      PrepareBulkLoadRequest request,
      RpcCallback<PrepareBulkLoadResponse> done) {
    try {
      List<BulkLoadObserver> bulkLoadObservers = getBulkLoadObservers();

      if (bulkLoadObservers != null) {
        ObserverContext<RegionCoprocessorEnvironment> ctx =
            new ObserverContext<RegionCoprocessorEnvironment>();
        ctx.prepare(env);

        for (BulkLoadObserver bulkLoadObserver : bulkLoadObservers) {
          bulkLoadObserver.prePrepareBulkLoad(ctx, request);
        }
      }

      String bulkToken =
          createStagingDir(
                  baseStagingDir, getActiveUser(), ProtobufUtil.toTableName(request.getTableName()))
              .toString();
      done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build());
    } catch (IOException e) {
      ResponseConverter.setControllerException(controller, e);
    }
    done.run(null);
  }
  @Override
  public void serializeStateData(final OutputStream stream) throws IOException {
    super.serializeStateData(stream);

    MasterProcedureProtos.ModifyColumnFamilyStateData.Builder modifyCFMsg =
        MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder()
            .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
            .setTableName(ProtobufUtil.toProtoTableName(tableName))
            .setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
    if (unmodifiedHTableDescriptor != null) {
      modifyCFMsg.setUnmodifiedTableSchema(
          ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
    }

    modifyCFMsg.build().writeDelimitedTo(stream);
  }
示例#12
0
 /**
  * Call directly from client such as hbase shell
  *
  * @return ReplicationLoadSink
  */
 public ReplicationLoadSink getReplicationLoadSink() {
   if (serverLoad.hasReplLoadSink()) {
     return ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink());
   } else {
     return null;
   }
 }
示例#13
0
 /**
  * Convert a RegionInfo to a HRegionInfo
  *
  * @param proto the RegionInfo to convert
  * @return the converted HRegionInfho
  */
 public static HRegionInfo convert(final RegionInfo proto) {
   if (proto == null) return null;
   TableName tableName = ProtobufUtil.toTableName(proto.getTableName());
   if (tableName.equals(TableName.META_TABLE_NAME)) {
     return FIRST_META_REGIONINFO;
   }
   long regionId = proto.getRegionId();
   int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : DEFAULT_REPLICA_ID;
   byte[] startKey = null;
   byte[] endKey = null;
   if (proto.hasStartKey()) {
     startKey = proto.getStartKey().toByteArray();
   }
   if (proto.hasEndKey()) {
     endKey = proto.getEndKey().toByteArray();
   }
   boolean split = false;
   if (proto.hasSplit()) {
     split = proto.getSplit();
   }
   HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId, replicaId);
   if (proto.hasOffline()) {
     hri.setOffline(proto.getOffline());
   }
   return hri;
 }
示例#14
0
  /**
   * Check if the specified snapshot is done
   *
   * @param expected
   * @return true if snapshot is ready to be restored, false if it is still being taken.
   * @throws IOException IOException if error from HDFS or RPC
   * @throws UnknownSnapshotException if snapshot is invalid or does not exist.
   */
  public boolean isSnapshotDone(SnapshotDescription expected) throws IOException {
    // check the request to make sure it has a snapshot
    if (expected == null) {
      throw new UnknownSnapshotException(
          "No snapshot name passed in request, can't figure out which snapshot you want to check.");
    }

    String ssString = ClientSnapshotDescriptionUtils.toString(expected);

    // check to see if the sentinel exists,
    // and if the task is complete removes it from the in-progress snapshots map.
    SnapshotSentinel handler = removeSentinelIfFinished(this.snapshotHandlers, expected);

    // stop tracking "abandoned" handlers
    cleanupSentinels();

    if (handler == null) {
      // If there's no handler in the in-progress map, it means one of the following:
      //   - someone has already requested the snapshot state
      //   - the requested snapshot was completed long time ago (cleanupSentinels() timeout)
      //   - the snapshot was never requested
      // In those cases returns to the user the "done state" if the snapshots exists on disk,
      // otherwise raise an exception saying that the snapshot is not running and doesn't exist.
      if (!isSnapshotCompleted(expected)) {
        throw new UnknownSnapshotException(
            "Snapshot "
                + ssString
                + " is not currently running or one of the known completed snapshots.");
      }
      // was done, return true;
      return true;
    }

    // pass on any failure we find in the sentinel
    try {
      handler.rethrowExceptionIfFailed();
    } catch (ForeignException e) {
      // Give some procedure info on an exception.
      String status;
      Procedure p = coordinator.getProcedure(expected.getName());
      if (p != null) {
        status = p.getStatus();
      } else {
        status = expected.getName() + " not found in proclist " + coordinator.getProcedureNames();
      }
      throw new HBaseSnapshotException(
          "Snapshot " + ssString + " had an error.  " + status,
          e,
          ProtobufUtil.createSnapshotDesc(expected));
    }

    // check to see if we are done
    if (handler.isFinished()) {
      LOG.debug("Snapshot '" + ssString + "' has completed, notifying client.");
      return true;
    } else if (LOG.isDebugEnabled()) {
      LOG.debug("Snapshoting '" + ssString + "' is still in progress!");
    }
    return false;
  }
示例#15
0
  @Test(timeout = 300000)
  public void testClusterRequests() throws Exception {

    // sending fake request to master to see how metric value has changed
    RegionServerStatusProtos.RegionServerReportRequest.Builder request =
        RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
    ServerName serverName = cluster.getMaster(0).getServerName();
    request.setServer(ProtobufUtil.toServerName(serverName));

    MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
    ClusterStatusProtos.ServerLoad sl =
        ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(10000).build();
    masterSource.init();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 10000, masterSource);

    sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(15000).build();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);

    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
    master.stopMaster();
  }
示例#16
0
 /**
  * Take a snapshot using the specified handler. On failure the snapshot temporary working
  * directory is removed. NOTE: prepareToTakeSnapshot() called before this one takes care of the
  * rejecting the snapshot request if the table is busy with another snapshot/restore operation.
  *
  * @param snapshot the snapshot description
  * @param handler the snapshot handler
  */
 private synchronized void snapshotTable(
     SnapshotDescription snapshot, final TakeSnapshotHandler handler)
     throws HBaseSnapshotException {
   try {
     handler.prepare();
     this.executorService.submit(handler);
     this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
   } catch (Exception e) {
     // cleanup the working directory by trying to delete it from the fs.
     Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
     try {
       if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
         LOG.error(
             "Couldn't delete working directory ("
                 + workingDir
                 + " for snapshot:"
                 + ClientSnapshotDescriptionUtils.toString(snapshot));
       }
     } catch (IOException e1) {
       LOG.error(
           "Couldn't delete working directory ("
               + workingDir
               + " for snapshot:"
               + ClientSnapshotDescriptionUtils.toString(snapshot));
     }
     // fail the snapshot
     throw new SnapshotCreationException(
         "Could not build snapshot handler", e, ProtobufUtil.createSnapshotDesc(snapshot));
   }
 }
示例#17
0
  /**
   * Delete the specified snapshot
   *
   * @param snapshot
   * @throws SnapshotDoesNotExistException If the specified snapshot does not exist.
   * @throws IOException For filesystem IOExceptions
   */
  public void deleteSnapshot(SnapshotDescription snapshot)
      throws SnapshotDoesNotExistException, IOException {
    // check to see if it is completed
    if (!isSnapshotCompleted(snapshot)) {
      throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot));
    }

    String snapshotName = snapshot.getName();
    // first create the snapshot description and check to see if it exists
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    // Get snapshot info from file system. The one passed as parameter is a "fake" snapshotInfo with
    // just the "name" and it does not contains the "real" snapshot information
    snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);

    // call coproc pre hook
    MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
    if (cpHost != null) {
      cpHost.preDeleteSnapshot(snapshot);
    }

    LOG.debug("Deleting snapshot: " + snapshotName);
    // delete the existing snapshot
    if (!fs.delete(snapshotDir, true)) {
      throw new HBaseSnapshotException("Failed to delete snapshot directory: " + snapshotDir);
    }

    // call coproc post hook
    if (cpHost != null) {
      cpHost.postDeleteSnapshot(snapshot);
    }
  }
示例#18
0
  @Override
  public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
    RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request =
        RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.newBuilder()
            .setTableName(ProtobufUtil.toProtoTableName(tableName))
            .build();

    try {
      GetRSGroupInfoOfTableResponse resp = proxy.getRSGroupInfoOfTable(null, request);
      if (resp.hasRSGroupInfo()) {
        return RSGroupSerDe.toGroupInfo(resp.getRSGroupInfo());
      }
      return null;
    } catch (ServiceException e) {
      throw ProtobufUtil.handleRemoteException(e);
    }
  }
示例#19
0
 /**
  * Publish the scan metrics. For now, we use scan.setAttribute to pass the metrics back to the
  * application or TableInputFormat.Later, we could push it to other systems. We don't use metrics
  * framework because it doesn't support multi-instances of the same metrics on the same machine;
  * for scan/map reduce scenarios, we will have multiple scans running at the same time.
  *
  * <p>By default, scan metrics are disabled; if the application wants to collect them, this
  * behavior can be turned on by calling calling:
  *
  * <p>scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE))
  */
 protected void writeScanMetrics() {
   if (this.scanMetrics == null || scanMetricsPublished) {
     return;
   }
   MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics);
   scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray());
   scanMetricsPublished = true;
 }
 /**
  * @param clusterKey
  * @return Serialized protobuf of <code>clusterKey</code> with pb magic prefix prepended suitable
  *     for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under
  *     /hbase/replication/peers/PEER_ID
  */
 private static byte[] toByteArray(final String clusterKey) {
   byte[] bytes =
       ZooKeeperProtos.ReplicationPeer.newBuilder()
           .setClusterkey(clusterKey)
           .build()
           .toByteArray();
   return ProtobufUtil.prependPBMagic(bytes);
 }
  @Override
  public void deserializeStateData(final InputStream stream) throws IOException {
    super.deserializeStateData(stream);

    MasterProcedureProtos.CreateNamespaceStateData createNamespaceMsg =
        MasterProcedureProtos.CreateNamespaceStateData.parseDelimitedFrom(stream);
    nsDescriptor = ProtobufUtil.toNamespaceDescriptor(createNamespaceMsg.getNamespaceDescriptor());
  }
示例#22
0
 /** Extract the region encoded name from the region manifest */
 static String getRegionNameFromManifest(final SnapshotRegionManifest manifest) {
   byte[] regionName =
       HRegionInfo.createRegionName(
           ProtobufUtil.toTableName(manifest.getRegionInfo().getTableName()),
           manifest.getRegionInfo().getStartKey().toByteArray(),
           manifest.getRegionInfo().getRegionId(),
           true);
   return HRegionInfo.encodeRegionName(regionName);
 }
  @Override
  public void serializeStateData(final OutputStream stream) throws IOException {
    super.serializeStateData(stream);

    MasterProcedureProtos.CreateNamespaceStateData.Builder createNamespaceMsg =
        MasterProcedureProtos.CreateNamespaceStateData.newBuilder()
            .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
    createNamespaceMsg.build().writeDelimitedTo(stream);
  }
  public Result get(
      final TransactionState transactionState, final Get get, final boolean bool_addLocation)
      throws IOException {
    if (LOG.isTraceEnabled()) LOG.trace("Enter TransactionalTable.get");

    if (bool_addLocation) addLocation(transactionState, super.getRegionLocation(get.getRow()));
    final String regionName =
        super.getRegionLocation(get.getRow()).getRegionInfo().getRegionNameAsString();
    Batch.Call<TrxRegionService, GetTransactionalResponse> callable =
        new Batch.Call<TrxRegionService, GetTransactionalResponse>() {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<GetTransactionalResponse> rpcCallback =
              new BlockingRpcCallback<GetTransactionalResponse>();

          @Override
          public GetTransactionalResponse call(TrxRegionService instance) throws IOException {
            org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos
                    .GetTransactionalRequest.Builder
                builder = GetTransactionalRequest.newBuilder();
            builder.setGet(ProtobufUtil.toGet(get));
            builder.setTransactionId(transactionState.getTransactionId());
            builder.setRegionName(ByteString.copyFromUtf8(regionName));

            instance.get(controller, builder.build(), rpcCallback);
            return rpcCallback.get();
          }
        };

    GetTransactionalResponse result = null;
    try {
      int retryCount = 0;
      boolean retry = false;
      do {
        Iterator<Map.Entry<byte[], TrxRegionProtos.GetTransactionalResponse>> it =
            super.coprocessorService(TrxRegionService.class, get.getRow(), get.getRow(), callable)
                .entrySet()
                .iterator();
        if (it.hasNext()) {
          result = it.next().getValue();
          retry = false;
        }

        if (result == null || result.getException().contains("closing region")) {
          Thread.sleep(TransactionalTable.delay);
          retry = true;
          transactionState.setRetried(true);
          retryCount++;
        }
      } while (retryCount < TransactionalTable.retries && retry == true);
    } catch (Throwable e) {
      if (LOG.isErrorEnabled()) LOG.error("ERROR while calling getTransactional ", e);
      throw new IOException("ERROR while calling getTransactional ", e);
    }
    if (result == null) throw new IOException(retryErrMsg);
    else if (result.hasException()) throw new IOException(result.getException());
    return ProtobufUtil.toResult(result.getResult());
  }
  /**
   * Pass along the found abort notification to the listener
   *
   * @param abortZNode full znode path to the failed procedure information
   */
  protected void abort(String abortZNode) {
    LOG.debug("Aborting procedure member for znode " + abortZNode);
    String opName = ZKUtil.getNodeName(abortZNode);
    try {
      byte[] data = ZKUtil.getData(zkController.getWatcher(), abortZNode);

      // figure out the data we need to pass
      ForeignException ee;
      try {
        if (data == null || data.length == 0) {
          // ignore
          return;
        } else if (!ProtobufUtil.isPBMagicPrefix(data)) {
          String msg =
              "Illegally formatted data in abort node for proc "
                  + opName
                  + ".  Killing the procedure.";
          LOG.error(msg);
          // we got a remote exception, but we can't describe it so just return exn from here
          ee = new ForeignException(getMemberName(), new IllegalArgumentException(msg));
        } else {
          data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length);
          ee = ForeignException.deserialize(data);
        }
      } catch (InvalidProtocolBufferException e) {
        LOG.warn(
            "Got an error notification for op:"
                + opName
                + " but we can't read the information. Killing the procedure.");
        // we got a remote exception, but we can't describe it so just return exn from here
        ee = new ForeignException(getMemberName(), e);
      }

      this.member.receiveAbortProcedure(opName, ee);
    } catch (KeeperException e) {
      member.controllerConnectionFailure(
          "Failed to get data for abort znode:" + abortZNode + zkController.getAbortZnode(),
          e,
          opName);
    } catch (InterruptedException e) {
      LOG.warn("abort already in progress", e);
      Thread.currentThread().interrupt();
    }
  }
 /**
  * @param bytes Content of a peer znode.
  * @return ClusterKey parsed from the passed bytes.
  * @throws DeserializationException
  */
 private static String parsePeerFrom(final byte[] bytes) throws DeserializationException {
   if (ProtobufUtil.isPBMagicPrefix(bytes)) {
     int pblen = ProtobufUtil.lengthOfPBMagic();
     ZooKeeperProtos.ReplicationPeer.Builder builder =
         ZooKeeperProtos.ReplicationPeer.newBuilder();
     ZooKeeperProtos.ReplicationPeer peer;
     try {
       peer = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
     } catch (InvalidProtocolBufferException e) {
       throw new DeserializationException(e);
     }
     return peer.getClusterkey();
   } else {
     if (bytes.length > 0) {
       return Bytes.toString(bytes);
     }
     return "";
   }
 }
示例#27
0
 @Override
 public void removeRSGroup(String name) throws IOException {
   RSGroupAdminProtos.RemoveRSGroupRequest request =
       RSGroupAdminProtos.RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build();
   try {
     proxy.removeRSGroup(null, request);
   } catch (ServiceException e) {
     throw ProtobufUtil.handleRemoteException(e);
   }
 }
示例#28
0
 static ThrottleSettings fromTimedQuota(
     final String userName,
     final TableName tableName,
     final String namespace,
     ThrottleType type,
     QuotaProtos.TimedQuota timedQuota) {
   QuotaProtos.ThrottleRequest.Builder builder = QuotaProtos.ThrottleRequest.newBuilder();
   builder.setType(ProtobufUtil.toProtoThrottleType(type));
   builder.setTimedQuota(timedQuota);
   return new ThrottleSettings(userName, tableName, namespace, builder.build());
 }
示例#29
0
 @Override
 public void write(final DataOutput out) throws IOException {
   ProtobufUtil.toMutationNoData(MutationType.PUT, put)
       .writeDelimitedTo(DataOutputOutputStream.from(out));
   out.writeInt(put.size());
   CellScanner scanner = put.cellScanner();
   while (scanner.advance()) {
     KeyValue kv = KeyValueUtil.ensureKeyValue(scanner.current());
     KeyValue.write(kv, out);
   }
 }
示例#30
0
  @Override
  public boolean balanceRSGroup(String name) throws IOException {
    RSGroupAdminProtos.BalanceRSGroupRequest request =
        RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder().setRSGroupName(name).build();

    try {
      return proxy.balanceRSGroup(null, request).getBalanceRan();
    } catch (ServiceException e) {
      throw ProtobufUtil.handleRemoteException(e);
    }
  }