@Test(expected = FileNotFoundException.class)
  public final void testNoSuchFile() throws Exception {
    QueryIdFactory.reset();
    SubQueryId schid = QueryIdFactory.newSubQueryId(QueryIdFactory.newQueryId());
    QueryUnitId qid1 = QueryIdFactory.newQueryUnitId(schid);
    QueryUnitId qid2 = QueryIdFactory.newQueryUnitId(schid);

    File qid1Dir = new File(TEST_DATA + "/" + qid1.toString() + "/out");
    qid1Dir.mkdirs();
    File qid2Dir = new File(TEST_DATA + "/" + qid2.toString() + "/out");
    qid2Dir.mkdirs();

    Random rnd = new Random();
    FileWriter writer = new FileWriter(qid1Dir + "/" + "testHttp");
    String watermark1 = "test_" + rnd.nextInt();
    writer.write(watermark1);
    writer.flush();
    writer.close();

    writer = new FileWriter(qid2Dir + "/" + "testHttp");
    String watermark2 = "test_" + rnd.nextInt();
    writer.write(watermark2);
    writer.flush();
    writer.close();

    InterDataRetriever ret = new InterDataRetriever();
    HttpDataServer server = new HttpDataServer(NetUtils.createSocketAddr("127.0.0.1:0"), ret);
    server.start();

    ret.register(qid1, qid1Dir.getPath());
    InetSocketAddress addr = server.getBindAddress();
    assertDataRetrival(qid1, addr.getPort(), watermark1);
    ret.unregister(qid1);
    assertDataRetrival(qid1, addr.getPort(), watermark1);
  }
 public void checkNameServiceId(Configuration conf, String addr, String expectedNameServiceId) {
   InetSocketAddress s = NetUtils.createSocketAddr(addr);
   String nameserviceId =
       DFSUtil.getNameServiceIdFromAddress(
           conf, s, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
   assertEquals(expectedNameServiceId, nameserviceId);
 }
  @Test
  public final void testHttpDataServer() throws Exception {
    Random rnd = new Random();
    FileWriter writer = new FileWriter(TEST_DATA + "/" + "testHttp");
    String watermark = "test_" + rnd.nextInt();
    writer.write(watermark + "\n");
    writer.flush();
    writer.close();

    DataRetriever ret = new DirectoryRetriever(TEST_DATA);
    HttpDataServer server = new HttpDataServer(NetUtils.createSocketAddr("127.0.0.1:0"), ret);
    server.start();

    InetSocketAddress addr = server.getBindAddress();
    URL url = new URL("http://127.0.0.1:" + addr.getPort() + "/testHttp");
    BufferedReader in = new BufferedReader(new InputStreamReader(url.openStream()));
    String line = null;
    boolean found = false;
    while ((line = in.readLine()) != null) {
      System.out.println(line);
      if (line.equals(watermark)) found = true;
    }
    assertTrue(found);
    in.close();
    server.stop();
  }
Beispiel #4
0
  /** Initialize SecondaryNameNode. */
  private void initialize(Configuration conf) throws IOException {
    // initiate Java VM metrics
    JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));

    // Create connection to the namenode.
    shouldRun = true;
    nameNodeAddr = NameNode.getAddress(conf);

    this.conf = conf;
    this.namenode =
        (NamenodeProtocol)
            RPC.waitForProxy(
                NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf);

    // initialize checkpoint directories
    fsName = getInfoServer();
    checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary");
    checkpointImage = new CheckpointStorage(conf);
    checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

    // Initialize other scheduling parameters from the configuration
    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    String infoAddr =
        NetUtils.getServerAddress(
            conf,
            "dfs.secondary.info.bindAddress",
            "dfs.secondary.info.port",
            "dfs.secondary.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("name.system.image", checkpointImage);
    this.infoServer.setAttribute("name.conf", conf);
    infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
    infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    infoPort = infoServer.getPort();
    conf.set("dfs.secondary.http.address", infoBindAddress + ":" + infoPort);
    LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
    LOG.warn(
        "Checkpoint Period   :"
            + checkpointPeriod
            + " secs "
            + "("
            + checkpointPeriod / 60
            + " min)");
    LOG.warn(
        "Log Size Trigger    :"
            + checkpointSize
            + " bytes "
            + "("
            + checkpointSize / 1024
            + " KB)");
  }
  @Override
  public void start() {

    // NodeManager is the last service to start, so NodeId is available.
    this.nodeId = this.context.getNodeId();

    String httpBindAddressStr =
        getConfig()
            .get(YarnConfiguration.NM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS);
    InetSocketAddress httpBindAddress =
        NetUtils.createSocketAddr(
            httpBindAddressStr,
            YarnConfiguration.DEFAULT_NM_WEBAPP_PORT,
            YarnConfiguration.NM_WEBAPP_ADDRESS);
    try {
      //      this.hostName = InetAddress.getLocalHost().getCanonicalHostName();
      this.httpPort = httpBindAddress.getPort();
      // Registration has to be in start so that ContainerManager can get the
      // perNM tokens needed to authenticate ContainerTokens.
      registerWithRM();
      super.start();
      startStatusUpdater();
    } catch (Exception e) {
      throw new AvroRuntimeException(e);
    }
  }
  public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR, "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer =
        new HttpServer(
            "cb", infoAddr.getHostName(), infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
  static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
      DatanodeID datanodeid,
      Configuration conf,
      int socketTimeout,
      boolean connectToDnViaHostname,
      LocatedBlock locatedBlock)
      throws IOException {
    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
    InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
    }

    // Since we're creating a new UserGroupInformation here, we know that no
    // future RPC proxies will be able to re-use the same connection. And
    // usages of this proxy tend to be one-off calls.
    //
    // This is a temporary fix: callers should really achieve this by using
    // RPC.stopProxy() on the resulting object, but this is currently not
    // working in trunk. See the discussion on HDFS-1965.
    Configuration confWithNoIpcIdle = new Configuration(conf);
    confWithNoIpcIdle.setInt(
        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

    UserGroupInformation ticket =
        UserGroupInformation.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
    ticket.addToken(locatedBlock.getBlockToken());
    return createClientDatanodeProtocolProxy(
        addr, ticket, confWithNoIpcIdle, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
  }
  public void launchSupervisorOnContainer(Container container) throws IOException {
    LOG.info("Connecting to ContainerManager for containerid=" + container.getId());
    String cmIpPortStr = container.getNodeId().getHost() + ":" + container.getNodeId().getPort();
    InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr);
    LOG.info("Connecting to ContainerManager at " + cmIpPortStr);
    ContainerManager proxy =
        ((ContainerManager) rpc.getProxy(ContainerManager.class, cmAddress, hadoopConf));

    LOG.info("launchSupervisorOnContainer( id:" + container.getId() + " )");
    ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class);

    launchContext.setContainerId(container.getId());
    launchContext.setResource(container.getResource());

    try {
      launchContext.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
    } catch (IOException e) {
      LOG.info(
          "Getting current user info failed when trying to launch the container" + e.getMessage());
    }

    Map<String, String> env = new HashMap<String, String>();
    launchContext.setEnvironment(env);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    String stormVersion = Util.getStormVersion(this.storm_conf);
    Path zip = new Path("/lib/storm/" + stormVersion + "/storm.zip");
    FileSystem fs = FileSystem.get(this.hadoopConf);
    localResources.put(
        "storm",
        Util.newYarnAppResource(
            fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC));

    String appHome = Util.getApplicationHomeForId(this.appAttemptId.toString());

    Path dirDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf);

    localResources.put("conf", Util.newYarnAppResource(fs, dirDst));

    launchContext.setLocalResources(localResources);

    List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf);

    launchContext.setCommands(supervisorArgs);

    StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class);
    startRequest.setContainerLaunchContext(launchContext);

    LOG.info(
        "launchSupervisorOnContainer: startRequest prepared, calling startContainer. "
            + startRequest);
    try {
      StartContainerResponse response = proxy.startContainer(startRequest);
      LOG.info("Got a start container response " + response);
    } catch (Exception e) {
      LOG.error("Caught an exception while trying to start a container", e);
      System.exit(-1);
    }
  }
 public static String jobTrackUrl(String requestUrl, Configuration conf, RunningJob job) {
   // Create a link to the status of the running job
   String trackerAddress = conf.get("mapred.job.tracker.http.address");
   InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(trackerAddress);
   int cutoff = requestUrl.indexOf('/', requestUrl.lastIndexOf(':'));
   requestUrl = requestUrl.substring(0, cutoff);
   InetSocketAddress requestSocAddr = NetUtils.createSocketAddr(requestUrl);
   String address =
       "http://"
           + requestSocAddr.getHostName()
           + ":"
           + infoSocAddr.getPort()
           + "/jobdetails.jsp?jobid="
           + job.getID()
           + "&amp;refresh=30";
   return address;
 }
  public void testSetSocketAddress() throws IOException {
    Configuration conf = new Configuration();
    NetUtils.addStaticResolution("host", "127.0.0.1");
    final String defaultAddr = "host:1";

    InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
    conf.setSocketAddr("myAddress", addr);
    assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
  }
  private DNAddrPair chooseDataNode(LocatedBlock block) throws IOException {
    while (true) {
      DatanodeInfo[] nodes = block.getLocations();
      try {
        DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
        InetSocketAddress targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
        return new DNAddrPair(chosenNode, targetAddr);
      } catch (IOException ie) {
        String blockInfo = block.getBlock() + " file=" + src;
        if (failures >= dfsClient.getMaxBlockAcquireFailures()) {
          throw new BlockMissingException(
              src, "Could not obtain block: " + blockInfo, block.getStartOffset());
        }

        if (nodes == null || nodes.length == 0) {
          DFSClient.LOG.info("No node available for block: " + blockInfo);
        }
        DFSClient.LOG.info(
            "Could not obtain block "
                + block.getBlock()
                + " from any node: "
                + ie
                + ". Will get new block locations from namenode and retry...");
        try {
          // Introducing a random factor to the wait time before another retry.
          // The wait time is dependent on # of failures and a random factor.
          // At the first time of getting a BlockMissingException, the wait time
          // is a random number between 0..3000 ms. If the first retry
          // still fails, we will wait 3000 ms grace period before the 2nd retry.
          // Also at the second retry, the waiting window is expanded to 6000 ms
          // alleviating the request rate from the server. Similarly the 3rd retry
          // will wait 6000ms grace period before retry and the waiting window is
          // expanded to 9000ms.
          double waitTime =
              timeWindow * failures
                  + // grace period for the last round of attempt
                  timeWindow
                      * (failures + 1)
                      * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
          DFSClient.LOG.warn(
              "DFS chooseDataNode: got # "
                  + (failures + 1)
                  + " IOException, will wait for "
                  + waitTime
                  + " msec.");
          Thread.sleep((long) waitTime);
        } catch (InterruptedException iex) {
        }
        deadNodes.clear(); // 2nd option is to remove only nodes[blockId]
        openInfo();
        block = getBlockAt(block.getStartOffset(), false);
        failures++;
        continue;
      }
    }
  }
 protected ResourceTracker getRMClient() {
   Configuration conf = getConfig();
   YarnRPC rpc = YarnRPC.create(conf);
   InetSocketAddress rmAddress =
       NetUtils.createSocketAddr(
           this.rmAddress,
           YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT,
           YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
   return (ResourceTracker) rpc.getProxy(ResourceTracker.class, rmAddress, conf);
 }
 @Override
 protected void serviceInit(Configuration conf) throws Exception {
   clientServiceBindAddress = RMADDRESS;
   /*
   clientServiceBindAddress = conf.get(
       YarnConfiguration.APPSMANAGER_ADDRESS,
       YarnConfiguration.DEFAULT_APPSMANAGER_BIND_ADDRESS);
       */
   clientBindAddress = NetUtils.createSocketAddr(clientServiceBindAddress);
   super.serviceInit(conf);
 }
  public static Map<String, InetSocketAddress> getHaJtRpcAddresses(Configuration conf) {

    // For JT HA there can only be one logical name (unlike HDFS)
    String logicalName = getLogicalName(conf);
    Map<String, InetSocketAddress> map = Maps.newHashMap();
    for (String jtId : getJtServiceIds(conf, logicalName)) {
      String address = conf.get(addKeySuffixes(MR_JOBTRACKER_RPC_ADDRESS_KEY, logicalName, jtId));
      InetSocketAddress isa = NetUtils.createSocketAddr(address);
      map.put(jtId, isa);
    }
    return map;
  }
Beispiel #15
0
 /** Returns the Jetty server that the Namenode is listening on. */
 private String getInfoServer() throws IOException {
   URI fsName = FileSystem.getDefaultUri(conf);
   if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
     throw new IOException("This is not a DFS");
   }
   String configuredAddress = conf.get("dfs.http.address", "0.0.0.0:50070");
   InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
   if (sockAddr.getAddress().isAnyLocalAddress()) {
     return fsName.getHost() + ":" + sockAddr.getPort();
   } else {
     return configuredAddress;
   }
 }
 /**
  * Delegate responsible for communicating with the Resource Manager's {@link ClientRMProtocol}.
  *
  * @param conf the configuration object.
  */
 public ResourceMgrDelegate(YarnConfiguration conf) {
   this.conf = conf;
   YarnRPC rpc = YarnRPC.create(this.conf);
   InetSocketAddress rmAddress =
       NetUtils.createSocketAddr(
           this.conf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS),
           YarnConfiguration.DEFAULT_RM_PORT,
           YarnConfiguration.RM_ADDRESS);
   this.rmAddress = rmAddress.toString();
   LOG.debug("Connecting to ResourceManager at " + rmAddress);
   applicationsManager =
       (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, this.conf);
   LOG.debug("Connected to ResourceManager at " + rmAddress);
 }
  /**
   * Returns logicalName and jobtracker Id when the local host matches the configuration parameter
   * {@code addressKey}.<logical>.<jobtracker Id>
   *
   * @param conf Configuration
   * @param addressKey configuration key corresponding to the address.
   * @param logicalName
   * @param matcher matching criteria for matching the address
   * @return Array with logical name and jobtracker Id on success. First element in the array is
   *     logical name and second element is jobtracker Id. Null value indicates that the
   *     configuration does not have the the Id.
   * @throws HadoopIllegalArgumentException on error
   */
  static String[] getSuffixIDs(
      final Configuration conf,
      final String addressKey,
      String logicalName,
      final AddressMatcher matcher) {
    String jobTrackerId = null;
    int found = 0;

    Collection<String> jtIds = getJtServiceIds(conf, logicalName);
    for (String jtId : emptyAsSingletonNull(jtIds)) {
      if (LOG.isTraceEnabled()) {
        LOG.trace(
            String.format(
                "addressKey: %s logicalName: %s jtId: %s", addressKey, logicalName, jtId));
      }
      String key = DFSUtil.addKeySuffixes(addressKey, logicalName, jtId);
      String addr = conf.get(key);
      if (addr == null) {
        continue;
      }
      InetSocketAddress s = null;
      try {
        s = NetUtils.createSocketAddr(addr);
      } catch (Exception e) {
        LOG.warn("Exception in creating socket address " + addr, e);
        continue;
      }
      if (!s.isUnresolved() && matcher.match(s)) {
        jobTrackerId = jtId;
        found++;
      }
    }

    // Only one address must match the local address
    if (found == 0) {
      String msg =
          "Configuration has no addresses that match "
              + "local node's address. Please configure the system with "
              + MR_HA_JOBTRACKER_ID_KEY;
      throw new HadoopIllegalArgumentException(msg);
    } else if (found > 1) {
      String msg =
          "Configuration has multiple addresses that match "
              + "local node's address. Please configure the system with "
              + MR_HA_JOBTRACKER_ID_KEY;
      throw new HadoopIllegalArgumentException(msg);
    }
    return new String[] {logicalName, jobTrackerId};
  }
 public YarnClientRMConnection(YarnConfiguration config) {
   this.config = config;
   InetSocketAddress remoteAddress =
       NetUtils.createSocketAddr(
           config.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS));
   Configuration appsManagerServerConf = new Configuration(config);
   appsManagerServerConf.setClass(
       YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER,
       ClientRMSecurityInfo.class,
       SecurityInfo.class);
   YarnRPC rpc = YarnRPC.create(appsManagerServerConf);
   crmp =
       ((ClientRMProtocol)
           rpc.getProxy(ClientRMProtocol.class, remoteAddress, appsManagerServerConf));
 }
 /**
  * Constructor.
  *
  * @param datanodeid Datanode to connect to.
  * @param conf Configuration.
  * @param socketTimeout Socket timeout to use.
  * @param connectToDnViaHostname connect to the Datanode using its hostname
  * @throws IOException
  */
 public ClientDatanodeProtocolTranslatorPB(
     DatanodeID datanodeid, Configuration conf, int socketTimeout, boolean connectToDnViaHostname)
     throws IOException {
   final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
   InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
   if (LOG.isDebugEnabled()) {
     LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
   }
   rpcProxy =
       createClientDatanodeProtocolProxy(
           addr,
           UserGroupInformation.getCurrentUser(),
           conf,
           NetUtils.getDefaultSocketFactory(conf),
           socketTimeout);
 }
  public ClientRMProtocol getClientResourceManager() {
    if (clientResourceManager != null) return clientResourceManager;

    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    YarnRPC rpc = YarnRPC.create(yarnConf);
    InetSocketAddress rmAddress =
        NetUtils.createSocketAddr(
            yarnConf.get(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS));

    LOG.info("Connecting to the resource manager (client) at " + rmAddress);

    clientResourceManager =
        (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, conf);

    return clientResourceManager;
  }
  /**
   * try to access a block on a data node. If fails - throws exception
   *
   * @param datanode
   * @param lblock
   * @throws IOException
   */
  private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = lblock.getBlock();

    targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());

    BlockReader blockReader =
        new BlockReaderFactory(new DfsClientConf(conf))
            .setInetSocketAddress(targetAddr)
            .setBlock(block)
            .setFileName(
                BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId()))
            .setBlockToken(lblock.getBlockToken())
            .setStartOffset(0)
            .setLength(-1)
            .setVerifyChecksum(true)
            .setClientName("TestDataNodeVolumeFailure")
            .setDatanodeInfo(datanode)
            .setCachingStrategy(CachingStrategy.newDefaultStrategy())
            .setClientCacheContext(ClientContext.getFromConf(conf))
            .setConfiguration(conf)
            .setTracer(FsTracer.get(conf))
            .setRemotePeerFactory(
                new RemotePeerFactory() {
                  @Override
                  public Peer newConnectedPeer(
                      InetSocketAddress addr,
                      Token<BlockTokenIdentifier> blockToken,
                      DatanodeID datanodeId)
                      throws IOException {
                    Peer peer = null;
                    Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
                    try {
                      sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                      sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                      peer = DFSUtilClient.peerFromSocket(sock);
                    } finally {
                      if (peer == null) {
                        IOUtils.closeSocket(sock);
                      }
                    }
                    return peer;
                  }
                })
            .build();
    blockReader.close();
  }
Beispiel #22
0
 private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf)
     throws IOException {
   if (UserGroupInformation.isSecurityEnabled()) {
     DelegationTokenIdentifier dtId =
         new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
     FSNamesystem namesystem = mock(FSNamesystem.class);
     DelegationTokenSecretManager dtSecretManager =
         new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
     dtSecretManager.startThreads();
     Token<DelegationTokenIdentifier> token =
         new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
     SecurityUtil.setTokenService(token, NetUtils.createSocketAddr(uri.getAuthority()));
     token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
     ugi.addToken(token);
   }
   return (WebHdfsFileSystem) FileSystem.get(uri, conf);
 }
    public void start(Configuration conf) {
      YarnRPC rpc = YarnRPC.create(conf);
      // TODO : use fixed port ??
      InetSocketAddress address = NetUtils.createSocketAddr(hostAddress);
      InetAddress hostNameResolved = null;
      try {
        address.getAddress();
        hostNameResolved = InetAddress.getLocalHost();
      } catch (UnknownHostException e) {
        throw new YarnRuntimeException(e);
      }

      server = rpc.getServer(protocol, this, address, conf, null, 1);
      server.start();
      this.bindAddress = NetUtils.getConnectAddress(server);
      super.start();
      amRunning = true;
    }
  public AMRMProtocol getAMResourceManager() {
    if (amResourceManager != null) return amResourceManager;

    LOG.debug("Using configuration: " + conf);

    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    YarnRPC rpc = YarnRPC.create(yarnConf);
    InetSocketAddress rmAddress =
        NetUtils.createSocketAddr(
            yarnConf.get(
                YarnConfiguration.RM_SCHEDULER_ADDRESS,
                YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));

    LOG.info("Connecting to the resource manager (scheduling) at " + rmAddress);
    amResourceManager = (AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf);

    return amResourceManager;
  }
Beispiel #25
0
  public static DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    DatanodeInfo chosenNode = null;
    int failures = 0;
    Socket s = null;
    DatanodeInfo[] nodes = blk.getLocations();
    if (nodes == null || nodes.length == 0) {
      throw new IOException("No nodes contain this block");
    }
    while (s == null) {
      if (chosenNode == null) {
        do {
          chosenNode = nodes[rand.nextInt(nodes.length)];
        } while (deadNodes.contains(chosenNode));
      }
      int index = rand.nextInt(nodes.length);
      chosenNode = nodes[index];

      // just ping to check whether the node is alive
      InetSocketAddress targetAddr =
          NetUtils.createSocketAddr(chosenNode.getHost() + ":" + chosenNode.getInfoPort());

      try {
        s = new Socket();
        s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
      } catch (IOException e) {
        deadNodes.add(chosenNode);
        s.close();
        s = null;
        failures++;
      }
      if (failures == nodes.length)
        throw new IOException("Could not reach the block containing the data. Please try again");
    }
    s.close();
    return chosenNode;
  }
  /** Get a BlockReader for the given block. */
  public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToRead)
      throws IOException {
    InetSocketAddress targetAddr = null;
    Socket sock = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
    sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
    sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
    sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

    return BlockReaderFactory.newBlockReader(
        new DFSClient.Conf(conf),
        sock,
        targetAddr.toString() + ":" + block.getBlockId(),
        block,
        testBlock.getBlockToken(),
        offset,
        lenToRead,
        conf.getInt("io.file.buffer.size", 4096),
        true,
        "");
  }
Beispiel #27
0
  // TODO: HADOOP UPGRADE - replace with YarnConfiguration constants
  private Token<RMDelegationTokenIdentifier> getRMHAToken(
      org.apache.hadoop.yarn.api.records.Token rmDelegationToken) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    for (String rmId : conf.getStringCollection(RM_HA_IDS)) {
      LOG.info("Yarn Resource Manager id: {}", rmId);
      // Set RM_ID to get the corresponding RM_ADDRESS
      services.add(
          SecurityUtil.buildTokenService(
                  NetUtils.createSocketAddr(
                      conf.get(RM_HOSTNAME_PREFIX + rmId),
                      YarnConfiguration.DEFAULT_RM_PORT,
                      RM_HOSTNAME_PREFIX + rmId))
              .toString());
    }
    Text rmTokenService = new Text(Joiner.on(',').join(services));

    return new Token<RMDelegationTokenIdentifier>(
        rmDelegationToken.getIdentifier().array(),
        rmDelegationToken.getPassword().array(),
        new Text(rmDelegationToken.getKind()),
        rmTokenService);
  }
Beispiel #28
0
  public void generateFileChunks(JspWriter out, HttpServletRequest req) throws IOException {
    long startOffset = 0;

    int chunkSizeToView = 0;

    String referrer = req.getParameter("referrer");
    boolean noLink = false;
    if (referrer == null) {
      noLink = true;
    }

    String filename = req.getParameter("filename");
    if (filename == null) {
      out.print("Invalid input (file name absent)");
      return;
    }

    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
    int namenodeInfoPort = -1;
    if (namenodeInfoPortStr != null) namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);

    String chunkSizeToViewStr = req.getParameter("chunkSizeToView");
    if (chunkSizeToViewStr != null && Integer.parseInt(chunkSizeToViewStr) > 0)
      chunkSizeToView = Integer.parseInt(chunkSizeToViewStr);
    else chunkSizeToView = jspHelper.defaultChunkSizeToView;

    if (!noLink) {
      out.print("<h3>Tail of File: ");
      JspHelper.printPathWithLinks(filename, out, namenodeInfoPort);
      out.print("</h3><hr>");
      out.print("<a href=\"" + referrer + "\">Go Back to File View</a><hr>");
    } else {
      out.print("<h3>" + filename + "</h3>");
    }
    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
    out.print(
        "<input type=\"text\" name=\"chunkSizeToView\" value="
            + chunkSizeToView
            + " size=10 maxlength=10>");
    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\"><hr>");
    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename + "\">");
    out.print(
        "<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" + namenodeInfoPort + "\">");
    if (!noLink) out.print("<input type=\"hidden\" name=\"referrer\" value=\"" + referrer + "\">");

    // fetch the block from the datanode that has the last block for this file
    DFSClient dfs = new DFSClient(jspHelper.nameNodeAddr, jspHelper.conf);
    List<LocatedBlock> blocks =
        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
    if (blocks == null || blocks.size() == 0) {
      out.print("No datanodes contain blocks of file " + filename);
      dfs.close();
      return;
    }
    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
    long blockSize = lastBlk.getBlock().getNumBytes();
    long blockId = lastBlk.getBlock().getBlockId();
    long genStamp = lastBlk.getBlock().getGenerationStamp();
    DatanodeInfo chosenNode;
    try {
      chosenNode = jspHelper.bestNode(lastBlk);
    } catch (IOException e) {
      out.print(e.toString());
      dfs.close();
      return;
    }
    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
    // view the last chunkSizeToView bytes while Tailing
    if (blockSize >= chunkSizeToView) startOffset = blockSize - chunkSizeToView;
    else startOffset = 0;

    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
    jspHelper.streamBlockInAscii(
        addr, blockId, genStamp, blockSize, startOffset, chunkSizeToView, out);
    out.print("</textarea>");
    dfs.close();
  }
 private static InetSocketAddress getAddress(String address) {
   return NetUtils.createSocketAddr(address);
 }
  // try reading a block using a BlockReader directly
  protected void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    IOException ioe = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
      DatanodeInfo[] nodes = lblock.getLocations();
      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

      blockReader =
          new BlockReaderFactory(new DfsClientConf(conf))
              .setFileName(
                  BlockReaderFactory.getFileName(
                      targetAddr, "test-blockpoolid", block.getBlockId()))
              .setBlock(block)
              .setBlockToken(lblock.getBlockToken())
              .setInetSocketAddress(targetAddr)
              .setStartOffset(0)
              .setLength(-1)
              .setVerifyChecksum(true)
              .setClientName("TestBlockTokenWithDFS")
              .setDatanodeInfo(nodes[0])
              .setCachingStrategy(CachingStrategy.newDefaultStrategy())
              .setClientCacheContext(ClientContext.getFromConf(conf))
              .setConfiguration(conf)
              .setTracer(FsTracer.get(conf))
              .setRemotePeerFactory(
                  new RemotePeerFactory() {
                    @Override
                    public Peer newConnectedPeer(
                        InetSocketAddress addr,
                        Token<BlockTokenIdentifier> blockToken,
                        DatanodeID datanodeId)
                        throws IOException {
                      Peer peer = null;
                      Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
                      try {
                        sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                        sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                        peer = DFSUtilClient.peerFromSocket(sock);
                      } finally {
                        if (peer == null) {
                          IOUtils.closeSocket(sock);
                        }
                      }
                      return peer;
                    }
                  })
              .build();
    } catch (IOException ex) {
      ioe = ex;
    } finally {
      if (blockReader != null) {
        try {
          blockReader.close();
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
    if (shouldSucceed) {
      Assert.assertNotNull(
          "OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid",
          blockReader);
    } else {
      Assert.assertNotNull(
          "OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe);
      Assert.assertTrue(
          "OP_READ_BLOCK failed due to reasons other than access token: ",
          ioe instanceof InvalidBlockTokenException);
    }
  }