예제 #1
0
 /**
  * Check that a request to change this node's HA state is valid. In particular, verifies that, if
  * auto failover is enabled, non-forced requests from the HAAdmin CLI are rejected, and vice
  * versa.
  *
  * @param req the request to check
  * @throws AccessControlException if the request is disallowed
  */
 private void checkHaStateChange(StateChangeRequestInfo req) throws AccessControlException {
   switch (req.getSource()) {
     case REQUEST_BY_USER:
       if (autoFailoverEnabled) {
         throw new AccessControlException(
             "Manual failover for this ResourceManager is disallowed, "
                 + "because automatic failover is enabled.");
       }
       break;
     case REQUEST_BY_USER_FORCED:
       if (autoFailoverEnabled) {
         LOG.warn(
             "Allowing manual failover from "
                 + org.apache.hadoop.ipc.Server.getRemoteAddress()
                 + " even though automatic failover is enabled, because the user "
                 + "specified the force flag");
       }
       break;
     case REQUEST_BY_ZKFC:
       if (!autoFailoverEnabled) {
         throw new AccessControlException(
             "Request from ZK failover controller at "
                 + org.apache.hadoop.ipc.Server.getRemoteAddress()
                 + " denied "
                 + "since automatic failover is not enabled");
       }
       break;
   }
 }
예제 #2
0
  /**
   * Mark the block belonging to datanode as corrupt.
   *
   * @param blk Block to be added to CorruptReplicasMap
   * @param dn DatanodeDescriptor which holds the corrupt replica
   * @param reason a textual reason (for logging purposes)
   */
  public void addToCorruptReplicasMap(BlockInfo blk, DatanodeDescriptor dn, String reason)
      throws StorageException, TransactionContextException {
    Collection<DatanodeDescriptor> nodes = getNodes(blk);

    String reasonText;
    if (reason != null) {
      reasonText = " because " + reason;
    } else {
      reasonText = "";
    }

    if (!nodes.contains(dn)) {
      addCorruptReplicaToDB(new CorruptReplica(blk.getBlockId(), dn.getSId(), blk.getInodeId()));
      NameNode.blockStateChangeLog.info(
          "BLOCK NameSystem.addToCorruptReplicasMap: "
              + blk.getBlockName()
              + " added as corrupt on "
              + dn
              + " by "
              + Server.getRemoteIp()
              + reasonText);
    } else {
      NameNode.blockStateChangeLog.info(
          "BLOCK NameSystem.addToCorruptReplicasMap: "
              + "duplicate requested for "
              + blk.getBlockName()
              + " to add as corrupt "
              + "on "
              + dn
              + " by "
              + Server.getRemoteIp()
              + reasonText);
    }
  }
예제 #3
0
  /** Push the metrics to the monitoring subsystem on doUpdate() call. */
  public void doUpdates(MetricsContext context) {

    synchronized (this) {
      // ToFix - fix server to use the following two metrics directly so
      // the metrics do not have be copied here.
      numOpenConnections.set(myServer.getNumOpenConnections());
      callQueueLen.set(myServer.getCallQueueLen());
      for (MetricsBase m : registry.getMetricsList()) {
        m.pushMetric(metricsRecord);
      }
    }
    metricsRecord.update();
  }
 /** A helper api to add remote IP address */
 static void addRemoteIP(StringBuilder b) {
   InetAddress ip = Server.getRemoteIp();
   // ip address can be null for testcases
   if (ip != null) {
     add(Keys.IP, ip.getHostAddress(), b);
   }
 }
 @Override
 protected void serviceStop() throws Exception {
   if (server != null) {
     server.stop();
   }
   super.serviceStop();
   amRunning = false;
 }
예제 #6
0
 private static String getClientMachine() {
   String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
   if (clientMachine == null) { // not a web client
     clientMachine = Server.getRemoteAddress();
   }
   if (clientMachine == null) { // not a RPC client
     clientMachine = "";
   }
   return clientMachine;
 }
  /**
   * Test to verify that InterDatanode RPC timesout as expected when the server DN does not respond.
   */
  @Test(expected = SocketTimeoutException.class)
  public void testInterDNProtocolTimeout() throws Throwable {
    final Server server = new TestServer(1, true);
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId =
        new DatanodeID("localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
    InterDatanodeProtocol proxy = null;

    try {
      proxy = DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500);
      proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
      fail("Expected SocketTimeoutException exception, but did not get.");
    } finally {
      if (proxy != null) {
        RPC.stopProxy(proxy);
      }
      server.stop();
    }
  }
  public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR, "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer =
        new HttpServer(
            "cb", infoAddr.getHostName(), infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
    public void start(Configuration conf) {
      YarnRPC rpc = YarnRPC.create(conf);
      // TODO : use fixed port ??
      InetSocketAddress address = NetUtils.createSocketAddr(hostAddress);
      InetAddress hostNameResolved = null;
      try {
        address.getAddress();
        hostNameResolved = InetAddress.getLocalHost();
      } catch (UnknownHostException e) {
        throw new YarnRuntimeException(e);
      }

      server = rpc.getServer(protocol, this, address, conf, null, 1);
      server.start();
      this.bindAddress = NetUtils.getConnectAddress(server);
      super.start();
      amRunning = true;
    }
예제 #10
0
 public void stop() {
   if (server != null) {
     server.stop();
   }
 }
예제 #11
0
 public void start() {
   Configuration conf = new Configuration();
   YarnRPC rpc = YarnRPC.create(conf);
   server = rpc.getServer(LocalizationProtocol.class, this, locAddr, conf, null, 1);
   server.start();
 }
예제 #12
0
 /** Register the rpcRequest deserializer for WritableRpcEngine */
 private static synchronized void initialize() {
   org.apache.hadoop.ipc.Server.registerProtocolEngine(
       RPC.RpcKind.RPC_WRITABLE, Invocation.class, new Server.WritableRpcInvoker());
   isInitialized = true;
 }