protected boolean next(Text key, Text value) throws IOException {
   if (fsin.getPos() < end) {
     try {
       if (readUntilMatch(START_TITLE_MARKER, false)) {
         if (readUntilMatch(END_TITLE_MARKER, true)) {
           int stop = buffer.getLength() - END_TITLE_MARKER.length;
           key.set(buffer.getData(), 0, stop);
           buffer.reset();
           if (readUntilMatch(START_TEXT_MARKER, false)) {
             if (readUntilMatch(END_TEXT_MARKER, true)) {
               // un-escape the XML entities encoding and
               // re-encode the result as raw UTF8 bytes
               stop = buffer.getLength() - END_TITLE_MARKER.length;
               String xmlEscapedContent = new String(buffer.getData(), 0, stop + 1, UTF8);
               value.set(StringEscapeUtils.unescapeXml(xmlEscapedContent).getBytes(UTF8));
               return true;
             }
           }
         }
       }
     } finally {
       buffer.reset();
     }
   }
   return false;
 }
Ejemplo n.º 2
0
  private int compareInBinary1(SortComparator comp, ITuple tuple1, ITuple tuple2)
      throws IOException {
    DataOutputBuffer buffer1 = new DataOutputBuffer();
    ser.ser(new DatumWrapper(tuple1), buffer1);

    DataOutputBuffer buffer2 = new DataOutputBuffer();
    ser.ser(new DatumWrapper(tuple2), buffer2);

    return comp.compare(
        buffer1.getData(), 0, buffer1.getLength(), buffer2.getData(), 0, buffer2.getLength());
  }
Ejemplo n.º 3
0
  /**
   * Creates a ByteBuffer with serialized {@link Credentials}.
   *
   * @param creds The credentials.
   * @return The ByteBuffer with the credentials.
   * @throws IOException
   */
  public static ByteBuffer createTokenBuffer(Credentials creds) throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer();

    creds.writeTokenStorageToStream(dob);

    return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  }
Ejemplo n.º 4
0
 private void addTimelineDelegationToken(ContainerLaunchContext clc)
     throws YarnException, IOException {
   Credentials credentials = new Credentials();
   DataInputByteBuffer dibb = new DataInputByteBuffer();
   ByteBuffer tokens = clc.getTokens();
   if (tokens != null) {
     dibb.reset(tokens);
     credentials.readTokenStorageStream(dibb);
     tokens.rewind();
   }
   // If the timeline delegation token is already in the CLC, no need to add
   // one more
   for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token :
       credentials.getAllTokens()) {
     if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
       return;
     }
   }
   org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
       timelineDelegationToken = getTimelineDelegationToken();
   if (timelineDelegationToken == null) {
     return;
   }
   credentials.addToken(timelineService, timelineDelegationToken);
   if (LOG.isDebugEnabled()) {
     LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken);
   }
   DataOutputBuffer dob = new DataOutputBuffer();
   credentials.writeTokenStorageToStream(dob);
   tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
   clc.setTokens(tokens);
 }
  private void requestNewHdfsDelegationToken(
      ApplicationId applicationId, String user, boolean shouldCancelAtEnd)
      throws IOException, InterruptedException {
    // Get new hdfs tokens for this user
    Credentials credentials = new Credentials();
    Token<?>[] newTokens = obtainSystemTokensForUser(user, credentials);

    // Add new tokens to the toRenew list.
    LOG.info(
        "Received new tokens for " + applicationId + ". Received " + newTokens.length + " tokens.");
    if (newTokens.length > 0) {
      for (Token<?> token : newTokens) {
        if (token.isManaged()) {
          DelegationTokenToRenew tokenToRenew =
              new DelegationTokenToRenew(
                  applicationId, token, getConfig(), Time.now(), shouldCancelAtEnd, user);
          // renew the token to get the next expiration date.
          renewToken(tokenToRenew);
          setTimerForTokenRenewal(tokenToRenew);
          appTokens.get(applicationId).add(tokenToRenew);
          LOG.info("Received new token " + token);
        }
      }
    }
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    ByteBuffer byteBuffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer);
  }
Ejemplo n.º 6
0
    /* Initiates a call by sending the parameter to the remote server.
     * Note: this is not called from the Connection thread, but by other
     * threads.
     */
    protected void sendParam(Call call) {
      if (shouldCloseConnection.get()) {
        return;
      }

      // For serializing the data to be written.

      final DataOutputBuffer d = new DataOutputBuffer();
      try {
        if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id);

        d.writeInt(0xdeadbeef); // placeholder for data length
        d.writeInt(call.id);
        call.param.write(d);
        byte[] data = d.getData();
        int dataLength = d.getLength();
        // fill in the placeholder
        Bytes.putInt(data, 0, dataLength - 4);
        //noinspection SynchronizeOnNonFinalField
        synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC
          out.write(data, 0, dataLength);
          out.flush();
        }
      } catch (IOException e) {
        markClosed(e);
      } finally {
        // the buffer is just an in-memory buffer, but it is still polite to
        // close early
        IOUtils.closeStream(d);
      }
    }
Ejemplo n.º 7
0
  @Override
  protected void flushAndSync(boolean durable) throws IOException {
    int numReadyBytes = buf.countReadyBytes();
    if (numReadyBytes > 0) {
      int numReadyTxns = buf.countReadyTxns();
      long firstTxToFlush = buf.getFirstReadyTxId();

      assert numReadyTxns > 0;

      // Copy from our double-buffer into a new byte array. This is for
      // two reasons:
      // 1) The IPC code has no way of specifying to send only a slice of
      //    a larger array.
      // 2) because the calls to the underlying nodes are asynchronous, we
      //    need a defensive copy to avoid accidentally mutating the buffer
      //    before it is sent.
      DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
      buf.flushTo(bufToSend);
      assert bufToSend.getLength() == numReadyBytes;
      byte[] data = bufToSend.getData();
      assert data.length == bufToSend.getLength();

      QuorumCall<AsyncLogger, Void> qcall =
          loggers.sendEdits(
              segmentTxId, firstTxToFlush,
              numReadyTxns, data);
      loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");

      // Since we successfully wrote this batch, let the loggers know. Any future
      // RPCs will thus let the loggers know of the most recent transaction, even
      // if a logger has fallen behind.
      loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
    }
  }
 public static ByteBuffer serializeServiceData(Token<JobTokenIdentifier> jobToken)
     throws IOException {
   // TODO these bytes should be versioned
   DataOutputBuffer jobToken_dob = new DataOutputBuffer();
   jobToken.write(jobToken_dob);
   return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength());
 }
Ejemplo n.º 9
0
    public boolean next(LongWritable key, Text value) throws IOException {
      if (pos < end) {
        if (readUntilMatch(startTag, false)) {
          recordStartPos = pos - startTag.length;

          try {
            buffer.write(startTag);
            if (readUntilMatch(endTag, true)) {
              key.set(recordStartPos);
              value.set(buffer.getData(), 0, buffer.getLength());
              return true;
            }
          } finally {
            // Because input streams of gzipped files are not
            // seekable (specifically, do not support getPos), we
            // need to keep track of bytes consumed ourselves.

            // This is a sanity check to make sure our internal
            // computation of bytes consumed is accurate. This
            // should be removed later for efficiency once we
            // confirm that this code works correctly.

            if (fsin instanceof Seekable) {
              if (pos != ((Seekable) fsin).getPos()) {
                throw new RuntimeException("bytes consumed error!");
              }
            }

            buffer.reset();
          }
        }
      }
      return false;
    }
 private static void rawValueToTextBytes(
     DataOutputBuffer dataBuffer, DataInputBuffer inputBuffer, TextBytes textOut)
     throws IOException {
   inputBuffer.reset(dataBuffer.getData(), dataBuffer.getLength());
   int newLength = WritableUtils.readVInt(inputBuffer);
   textOut.set(inputBuffer.getData(), inputBuffer.getPosition(), newLength);
 }
Ejemplo n.º 11
0
  public static void setTokensFor(
      ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
    Credentials credentials = new Credentials();
    // for HDFS
    TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
    // for HBase
    obtainTokenForHBase(credentials, conf);
    // for user
    UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

    Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
      final Text id = new Text(token.getIdentifier());
      LOG.info("Adding user token " + id + " with " + token);
      credentials.addToken(id, token);
    }
    try (DataOutputBuffer dob = new DataOutputBuffer()) {
      credentials.writeTokenStorageToStream(dob);

      if (LOG.isDebugEnabled()) {
        LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
      }

      ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
      amContainer.setTokens(securityTokens);
    }
  }
  /**
   * input/output simple records.
   *
   * @throws Exception if test was failed
   */
  @SuppressWarnings("unchecked")
  @Test
  public void simple_record() throws Exception {
    ModelLoader loader = generate();

    Class<?> type = loader.modelType("Simple");
    assertThat(type.isAnnotationPresent(ModelInputLocation.class), is(true));
    assertThat(type.isAnnotationPresent(ModelOutputLocation.class), is(true));

    ModelWrapper object = loader.newModel("Simple");
    DataOutputBuffer output = new DataOutputBuffer();
    ModelOutput<Object> modelOut =
        (ModelOutput<Object>)
            type.getAnnotation(ModelOutputLocation.class)
                .value()
                .getDeclaredConstructor(RecordEmitter.class)
                .newInstance(new TsvEmitter(new OutputStreamWriter(output, "UTF-8")));

    object.set("sid", 1L);
    object.set("value", new Text("hello"));
    modelOut.write(object.unwrap());

    object.set("sid", 2L);
    object.set("value", new Text("world"));
    modelOut.write(object.unwrap());

    object.set("sid", 3L);
    object.set("value", null);
    modelOut.write(object.unwrap());
    modelOut.close();

    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), output.getLength());
    ModelInput<Object> modelIn =
        (ModelInput<Object>)
            type.getAnnotation(ModelInputLocation.class)
                .value()
                .getDeclaredConstructor(RecordParser.class)
                .newInstance(new TsvParser(new InputStreamReader(input, "UTF-8")));
    ModelWrapper copy = loader.newModel("Simple");

    modelIn.readTo(copy.unwrap());
    assertThat(copy.get("sid"), is((Object) 1L));
    assertThat(copy.get("value"), is((Object) new Text("hello")));

    modelIn.readTo(copy.unwrap());
    assertThat(copy.get("sid"), is((Object) 2L));
    assertThat(copy.get("value"), is((Object) new Text("world")));

    modelIn.readTo(copy.unwrap());
    assertThat(copy.get("sid"), is((Object) 3L));
    assertThat(copy.getOption("value").isNull(), is(true));

    assertThat(input.read(), is(-1));
    modelIn.close();
  }
Ejemplo n.º 13
0
 static byte[] write(Writable writable) {
   DataOutputBuffer buffer = new DataOutputBuffer();
   buffer.reset();
   try {
     writable.write(buffer);
   } catch (IOException e) {
     throw new AssertionError(e);
   }
   return Arrays.copyOf(buffer.getData(), buffer.getLength());
 }
Ejemplo n.º 14
0
    /* Write the header for each connection
     * Out is not synchronized because only the first thread does this.
     */
    private void writeHeader() throws IOException {
      out.write(HBaseServer.HEADER.array());
      out.write(HBaseServer.CURRENT_VERSION);
      // When there are more fields we can have ConnectionHeader Writable.
      DataOutputBuffer buf = new DataOutputBuffer();
      header.write(buf);

      int bufLen = buf.getLength();
      out.writeInt(bufLen);
      out.write(buf.getData(), 0, bufLen);
    }
 @Deprecated
 private byte[] getBytes(HRegionInfo hri) throws IOException {
   DataOutputBuffer out = new DataOutputBuffer();
   try {
     hri.write(out);
     return out.getData();
   } finally {
     if (out != null) {
       out.close();
     }
   }
 }
  /**
   * all primitive types.
   *
   * @throws Exception if test was failed
   */
  @SuppressWarnings("unchecked")
  @Test
  public void primitives() throws Exception {
    ModelLoader loader = generate();

    Class<?> type = loader.modelType("Primitives");
    assertThat(type.isAnnotationPresent(ModelInputLocation.class), is(true));
    assertThat(type.isAnnotationPresent(ModelOutputLocation.class), is(true));

    ModelWrapper object = loader.newModel("Primitives");

    object.set("type_boolean", true);
    object.set("type_byte", (byte) 64);
    object.set("type_short", (short) 256);
    object.set("type_int", 100);
    object.set("type_long", 200L);
    object.set("type_float", 300.f);
    object.set("type_double", 400.d);
    object.set("type_decimal", new BigDecimal("1234.567"));
    object.set("type_text", new Text("Hello, world!"));
    object.set("type_date", new Date(2011, 3, 31));
    object.set("type_datetime", new DateTime(2011, 3, 31, 23, 30, 1));

    DataOutputBuffer output = new DataOutputBuffer();
    ModelOutput<Object> modelOut =
        (ModelOutput<Object>)
            type.getAnnotation(ModelOutputLocation.class)
                .value()
                .getDeclaredConstructor(RecordEmitter.class)
                .newInstance(new TsvEmitter(new OutputStreamWriter(output, "UTF-8")));
    modelOut.write(object.unwrap());
    modelOut.write(object.unwrap());
    modelOut.write(object.unwrap());
    modelOut.close();

    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), output.getLength());
    ModelInput<Object> modelIn =
        (ModelInput<Object>)
            type.getAnnotation(ModelInputLocation.class)
                .value()
                .getDeclaredConstructor(RecordParser.class)
                .newInstance(new TsvParser(new InputStreamReader(input, "UTF-8")));
    ModelWrapper copy = loader.newModel("Primitives");
    modelIn.readTo(copy.unwrap());
    assertThat(object.unwrap(), equalTo(copy.unwrap()));
    assertThat(input.read(), is(-1));
    modelIn.close();
  }
Ejemplo n.º 17
0
  protected static void assertSerializable(HadoopSerialization ser, ITuple tuple, boolean debug)
      throws IOException {
    DataInputBuffer input = new DataInputBuffer();
    DataOutputBuffer output = new DataOutputBuffer();
    DatumWrapper<ITuple> wrapper = new DatumWrapper<ITuple>(tuple);
    ser.ser(wrapper, output);

    input.reset(output.getData(), 0, output.getLength());
    DatumWrapper<ITuple> wrapper2 = new DatumWrapper<ITuple>();

    wrapper2 = ser.deser(wrapper2, input);
    if (debug) {
      System.out.println("D:" + wrapper2.datum());
    }
    assertEquals(tuple, wrapper2.datum());
  }
Ejemplo n.º 18
0
  /** Used by child copy constructors. */
  protected synchronized void copy(Writable other) {
    if (other != null) {
      try {
        DataOutputBuffer out = new DataOutputBuffer();
        other.write(out);
        DataInputBuffer in = new DataInputBuffer();
        in.reset(out.getData(), out.getLength());
        readFields(in);

      } catch (IOException e) {
        throw new IllegalArgumentException("map cannot be copied: " + e.getMessage());
      }

    } else {
      throw new IllegalArgumentException("source map cannot be null");
    }
  }
Ejemplo n.º 19
0
 @Test
 public void testReadWriteReplicaState() {
   try {
     DataOutputBuffer out = new DataOutputBuffer();
     DataInputBuffer in = new DataInputBuffer();
     for (HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) {
       repState.write(out);
       in.reset(out.getData(), out.getLength());
       HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState.read(in);
       assertTrue("testReadWrite error !!!", repState == result);
       out.reset();
       in.reset();
     }
   } catch (Exception ex) {
     fail("testReadWrite ex error ReplicaState");
   }
 }
Ejemplo n.º 20
0
 @Override
 public boolean nextKeyValue() throws IOException, InterruptedException {
   if (fsin.getPos() < end) {
     if (readUntilMatch(startTag, false)) {
       try {
         buffer.write(startTag);
         if (readUntilMatch(endTag, true)) {
           key.set(fsin.getPos());
           value.set(buffer.getData(), 0, buffer.getLength());
           return true;
         }
       } finally {
         buffer.reset();
       }
     }
   }
   return false;
 }
Ejemplo n.º 21
0
 public int read() throws IOException {
   int ret;
   if (null == inbuf || -1 == (ret = inbuf.read())) {
     if (!r.next(key, val)) {
       return -1;
     }
     byte[] tmp = key.toString().getBytes();
     outbuf.write(tmp, 0, tmp.length);
     outbuf.write('\t');
     tmp = val.toString().getBytes();
     outbuf.write(tmp, 0, tmp.length);
     outbuf.write('\n');
     inbuf.reset(outbuf.getData(), outbuf.getLength());
     outbuf.reset();
     ret = inbuf.read();
   }
   return ret;
 }
Ejemplo n.º 22
0
  protected static void assertSerializable(
      TupleSerializer ser, TupleDeserializer deser, DatumWrapper<ITuple> tuple, boolean debug)
      throws IOException {
    DataOutputBuffer output = new DataOutputBuffer();
    ser.open(output);
    ser.serialize(tuple);
    ser.close();

    DataInputBuffer input = new DataInputBuffer();
    input.reset(output.getData(), 0, output.getLength());
    DatumWrapper<ITuple> deserializedTuple = new DatumWrapper<ITuple>();

    deser.open(input);
    deserializedTuple = deser.deserialize(deserializedTuple);
    deser.close();

    if (debug) {
      System.out.println("D:" + deserializedTuple.datum());
    }
    assertEquals(tuple.datum(), deserializedTuple.datum());
  }
Ejemplo n.º 23
0
  private ByteBuffer getSecurityTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    Closer closer = Closer.create();
    try {
      DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
      credentials.writeTokenStorageToStream(dataOutputBuffer);

      // Remove the AM->RM token so that containers cannot access it
      Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
      while (tokenIterator.hasNext()) {
        Token<?> token = tokenIterator.next();
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
          tokenIterator.remove();
        }
      }

      return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    } catch (Throwable t) {
      throw closer.rethrow(t);
    } finally {
      closer.close();
    }
  }
  @Test(timeout = 5000)
  public void testGetBytePayload() throws IOException {
    int numBuckets = 10;
    VertexManagerPluginContext context = mock(VertexManagerPluginContext.class);
    CustomVertexConfiguration vertexConf =
        new CustomVertexConfiguration(numBuckets, TezWork.VertexType.INITIALIZED_EDGES);
    DataOutputBuffer dob = new DataOutputBuffer();
    vertexConf.write(dob);
    UserPayload payload = UserPayload.create(ByteBuffer.wrap(dob.getData()));
    when(context.getUserPayload()).thenReturn(payload);

    CustomPartitionVertex vm = new CustomPartitionVertex(context);
    vm.initialize();

    // prepare empty routing table
    Multimap<Integer, Integer> routingTable = HashMultimap.<Integer, Integer>create();
    payload = vm.getBytePayload(routingTable);
    // get conf from user payload
    CustomEdgeConfiguration edgeConf = new CustomEdgeConfiguration();
    DataInputByteBuffer dibb = new DataInputByteBuffer();
    dibb.reset(payload.getPayload());
    edgeConf.readFields(dibb);
    assertEquals(numBuckets, edgeConf.getNumBuckets());
  }
Ejemplo n.º 25
0
  /*
   * Helper function to create an edge property from an edge type.
   */
  private EdgeProperty createEdgeProperty(TezEdgeProperty edgeProp) throws IOException {
    DataMovementType dataMovementType;
    Class logicalInputClass;
    Class logicalOutputClass;

    EdgeProperty edgeProperty = null;
    EdgeType edgeType = edgeProp.getEdgeType();
    switch (edgeType) {
      case BROADCAST_EDGE:
        dataMovementType = DataMovementType.BROADCAST;
        logicalOutputClass = OnFileUnorderedKVOutput.class;
        logicalInputClass = ShuffledUnorderedKVInput.class;
        break;

      case CUSTOM_EDGE:
        dataMovementType = DataMovementType.CUSTOM;
        logicalOutputClass = OnFileUnorderedPartitionedKVOutput.class;
        logicalInputClass = ShuffledUnorderedKVInput.class;
        EdgeManagerDescriptor edgeDesc =
            new EdgeManagerDescriptor(CustomPartitionEdge.class.getName());
        CustomEdgeConfiguration edgeConf =
            new CustomEdgeConfiguration(edgeProp.getNumBuckets(), null);
        DataOutputBuffer dob = new DataOutputBuffer();
        edgeConf.write(dob);
        byte[] userPayload = dob.getData();
        edgeDesc.setUserPayload(userPayload);
        edgeProperty =
            new EdgeProperty(
                edgeDesc,
                DataSourceType.PERSISTED,
                SchedulingType.SEQUENTIAL,
                new OutputDescriptor(logicalOutputClass.getName()),
                new InputDescriptor(logicalInputClass.getName()));
        break;

      case CUSTOM_SIMPLE_EDGE:
        dataMovementType = DataMovementType.SCATTER_GATHER;
        logicalOutputClass = OnFileUnorderedPartitionedKVOutput.class;
        logicalInputClass = ShuffledUnorderedKVInput.class;
        break;

      case SIMPLE_EDGE:
      default:
        dataMovementType = DataMovementType.SCATTER_GATHER;
        logicalOutputClass = OnFileSortedOutput.class;
        logicalInputClass = ShuffledMergedInputLegacy.class;
        break;
    }

    if (edgeProperty == null) {
      edgeProperty =
          new EdgeProperty(
              dataMovementType,
              DataSourceType.PERSISTED,
              SchedulingType.SEQUENTIAL,
              new OutputDescriptor(logicalOutputClass.getName()),
              new InputDescriptor(logicalInputClass.getName()));
    }

    return edgeProperty;
  }
Ejemplo n.º 26
0
  @Override
  // Read the page header
  // -1: EOF
  // 1 - outside the <page> tag
  // 2 - just passed the <page> tag but outside the <title>
  // 3 - just passed the <title> tag
  // 4 - just passed the </title> tag but outside the <namespace>
  // 5 - just passed the <namespace>
  // 6 - just passed the </namespace> but outside the <id>
  // 7 - just passed the (page's) <id>
  // 8 - just passed the </id> tag but outside the <revision>
  // 9 - (optionally) just passed the <redirect>
  // 10 - just passed the (next) <revision>
  protected Ack readToPageHeader(RevisionHeader meta) throws IOException {
    int i = 0;
    int flag = 2;
    boolean skipped = false;
    int revOrRedirect = -1;
    try (DataOutputBuffer pageTitle = new DataOutputBuffer();
        DataOutputBuffer nsBuf = new DataOutputBuffer();
        DataOutputBuffer keyBuf = new DataOutputBuffer()) {

      while (true) {
        if (!fetchMore()) return Ack.EOF;
        while (hasData()) {
          byte b = nextByte();

          // when passing the namespace and we realize that
          // this is not an article, and that the option of skipping
          // non-article pages is on, we simply skip everything until
          // the closing </page>
          if (skipped) {
            if (flag >= 6) {
              Log.warn("Peculiar read after skipping namespace");
              /*
              if (b == END_PAGE[i]) {
              	i++;
              } else i = 0;
              if (i >= END_PAGE.length) {
              	return Ack.SKIPPED;
              } */
              return Ack.FAILED;
            } else return Ack.SKIPPED;
          }

          if (flag == 2) {
            if (b == START_TITLE[i]) {
              i++;
            } else i = 0;
            if (i >= START_TITLE.length) {
              flag = 3;
              i = 0;
            }
          }

          // put everything between <title></title> block into title
          else if (flag == 3) {
            if (b == END_TITLE[i]) {
              i++;
            } else i = 0;
            pageTitle.write(b);
            if (i >= END_TITLE.length) {
              flag = 4;
              String title =
                  new String(pageTitle.getData(), 0, pageTitle.getLength() - END_TITLE.length);
              meta.setPageTitle(title);
              pageTitle.reset();
              i = 0;
            }
          } else if (flag == 4) {
            if (b == START_NAMESPACE[i]) {
              i++;
            } else i = 0;
            if (i >= START_NAMESPACE.length) {
              flag = 5;
              i = 0;
            }
          } else if (flag == 5) {
            if (b == END_NAMESPACE[i]) {
              i++;
            } else i = 0;
            nsBuf.write(b);
            if (i >= END_NAMESPACE.length) {
              flag = 6;
              String nsStr =
                  new String(nsBuf.getData(), 0, nsBuf.getLength() - END_NAMESPACE.length);
              int ns = Integer.parseInt(nsStr);
              nsBuf.reset();
              if (ns != 0) {
                if (skipNonArticles) {
                  skipped = true;
                  meta.clear();
                  return Ack.SKIPPED;
                }
              }
              meta.setNamespace(ns);
              i = 0;
            }
          } else if (flag == 6) {
            if (b == START_ID[i]) {
              i++;
            } else i = 0;
            if (i >= START_ID.length) {
              flag = 7;
              i = 0;
            }
          }

          // put everything in outer <id></id> block into keyBuf
          else if (flag == 7) {
            if (b == END_ID[i]) {
              i++;
            } else i = 0;
            keyBuf.write(b);
            if (i >= END_ID.length) {
              flag = 8;
              String idStr = new String(keyBuf.getData(), 0, keyBuf.getLength() - END_ID.length);
              long pageId = Long.parseLong(idStr);
              meta.setPageId(pageId);
              i = 0;
            }
          } else if (flag == 8) {
            int curMatch = 0;
            if ((i < START_REVISION.length && b == START_REVISION[i])
                && (i < START_REDIRECT.length && b == START_REDIRECT[i])

                // subtle bug here: some tag names can overlap
                // multiple times
                && (revOrRedirect == 3 || revOrRedirect == -1)) {
              curMatch = 3;
            } else if (i < START_REVISION.length && b == START_REVISION[i] && revOrRedirect != 2) {
              curMatch = 1;
            } else if (i < START_REDIRECT.length && b == START_REDIRECT[i] && revOrRedirect != 1) {
              curMatch = 2;
            } else {
              curMatch = 0;
            }
            if (curMatch > 0 && (i == 0 || revOrRedirect == 3 || curMatch == revOrRedirect)) {
              i++;
              revOrRedirect = curMatch;
            } else i = 0;
            if ((revOrRedirect == 2 || revOrRedirect == 3) && i >= START_REDIRECT.length) {
              if (skipRedirect) {
                skipped = true;
                meta.clear();
                return Ack.SKIPPED;
              }
              revOrRedirect = -1;
              flag = 9;
              i = 0;
            } else if ((revOrRedirect == 1 || revOrRedirect == 3) && i >= START_REVISION.length) {
              flag = 10;
              revOrRedirect = -1;
              return Ack.PASSED_TO_NEXT_TAG;
            }
          } else if (flag == 9 && !skipRedirect) {
            if (b == START_REVISION[i]) {
              i++;
            } else i = 0;
            if (i >= START_REVISION.length) {
              flag = 10;
              return Ack.PASSED_TO_NEXT_TAG;
            }
          }
        }
      }
    }
  }
Ejemplo n.º 27
0
  /**
   * create inmemory segments
   *
   * @return
   * @throws IOException
   */
  public List<TezMerger.Segment> createInMemStreams() throws IOException {
    int numberOfStreams = Math.max(2, rnd.nextInt(10));
    LOG.info("No of streams : " + numberOfStreams);

    SerializationFactory serializationFactory = new SerializationFactory(conf);
    Serializer keySerializer = serializationFactory.getSerializer(keyClass);
    Serializer valueSerializer = serializationFactory.getSerializer(valClass);

    LocalDirAllocator localDirAllocator =
        new LocalDirAllocator(TezRuntimeFrameworkConfigs.LOCAL_DIRS);
    InputContext context = createTezInputContext();
    MergeManager mergeManager =
        new MergeManager(
            conf,
            fs,
            localDirAllocator,
            context,
            null,
            null,
            null,
            null,
            null,
            1024 * 1024 * 10,
            null,
            false,
            -1);

    DataOutputBuffer keyBuf = new DataOutputBuffer();
    DataOutputBuffer valBuf = new DataOutputBuffer();
    DataInputBuffer keyIn = new DataInputBuffer();
    DataInputBuffer valIn = new DataInputBuffer();
    keySerializer.open(keyBuf);
    valueSerializer.open(valBuf);

    List<TezMerger.Segment> segments = new LinkedList<TezMerger.Segment>();
    for (int i = 0; i < numberOfStreams; i++) {
      BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);
      InMemoryWriter writer = new InMemoryWriter(bout);
      Map<Writable, Writable> data = createData();
      // write data
      for (Map.Entry<Writable, Writable> entry : data.entrySet()) {
        keySerializer.serialize(entry.getKey());
        valueSerializer.serialize(entry.getValue());
        keyIn.reset(keyBuf.getData(), 0, keyBuf.getLength());
        valIn.reset(valBuf.getData(), 0, valBuf.getLength());
        writer.append(keyIn, valIn);
        originalData.put(entry.getKey(), entry.getValue());
        keyBuf.reset();
        valBuf.reset();
        keyIn.reset();
        valIn.reset();
      }
      IFile.Reader reader =
          new InMemoryReader(mergeManager, null, bout.getBuffer(), 0, bout.getBuffer().length);
      segments.add(new TezMerger.Segment(reader, true));

      data.clear();
      writer.close();
    }
    return segments;
  }
  public void testBinary() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    Job job = new Job(conf);

    Path outdir = new Path(System.getProperty("test.build.data", "/tmp"), "outseq");
    Random r = new Random();
    long seed = r.nextLong();
    r.setSeed(seed);

    FileOutputFormat.setOutputPath(job, outdir);

    SequenceFileAsBinaryOutputFormat.setSequenceFileOutputKeyClass(job, IntWritable.class);
    SequenceFileAsBinaryOutputFormat.setSequenceFileOutputValueClass(job, DoubleWritable.class);

    SequenceFileAsBinaryOutputFormat.setCompressOutput(job, true);
    SequenceFileAsBinaryOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);

    BytesWritable bkey = new BytesWritable();
    BytesWritable bval = new BytesWritable();

    TaskAttemptContext context =
        MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
    OutputFormat<BytesWritable, BytesWritable> outputFormat =
        new SequenceFileAsBinaryOutputFormat();
    OutputCommitter committer = outputFormat.getOutputCommitter(context);
    committer.setupJob(job);
    RecordWriter<BytesWritable, BytesWritable> writer = outputFormat.getRecordWriter(context);

    IntWritable iwritable = new IntWritable();
    DoubleWritable dwritable = new DoubleWritable();
    DataOutputBuffer outbuf = new DataOutputBuffer();
    LOG.info("Creating data by SequenceFileAsBinaryOutputFormat");
    try {
      for (int i = 0; i < RECORDS; ++i) {
        iwritable = new IntWritable(r.nextInt());
        iwritable.write(outbuf);
        bkey.set(outbuf.getData(), 0, outbuf.getLength());
        outbuf.reset();
        dwritable = new DoubleWritable(r.nextDouble());
        dwritable.write(outbuf);
        bval.set(outbuf.getData(), 0, outbuf.getLength());
        outbuf.reset();
        writer.write(bkey, bval);
      }
    } finally {
      writer.close(context);
    }
    committer.commitTask(context);
    committer.commitJob(job);

    InputFormat<IntWritable, DoubleWritable> iformat =
        new SequenceFileInputFormat<IntWritable, DoubleWritable>();
    int count = 0;
    r.setSeed(seed);
    SequenceFileInputFormat.setInputPaths(job, outdir);
    LOG.info("Reading data by SequenceFileInputFormat");
    for (InputSplit split : iformat.getSplits(job)) {
      RecordReader<IntWritable, DoubleWritable> reader = iformat.createRecordReader(split, context);
      MapContext<IntWritable, DoubleWritable, BytesWritable, BytesWritable> mcontext =
          new MapContextImpl<IntWritable, DoubleWritable, BytesWritable, BytesWritable>(
              job.getConfiguration(),
              context.getTaskAttemptID(),
              reader,
              null,
              null,
              MapReduceTestUtil.createDummyReporter(),
              split);
      reader.initialize(split, mcontext);
      try {
        int sourceInt;
        double sourceDouble;
        while (reader.nextKeyValue()) {
          sourceInt = r.nextInt();
          sourceDouble = r.nextDouble();
          iwritable = reader.getCurrentKey();
          dwritable = reader.getCurrentValue();
          assertEquals(
              "Keys don't match: " + "*" + iwritable.get() + ":" + sourceInt + "*",
              sourceInt,
              iwritable.get());
          assertTrue(
              "Vals don't match: " + "*" + dwritable.get() + ":" + sourceDouble + "*",
              Double.compare(dwritable.get(), sourceDouble) == 0);
          ++count;
        }
      } finally {
        reader.close();
      }
    }
    assertEquals("Some records not found", RECORDS, count);
  }
Ejemplo n.º 29
0
  /**
   * Launch application for the dag represented by this client.
   *
   * @throws YarnException
   * @throws IOException
   */
  public void startApplication() throws YarnException, IOException {
    Class<?>[] defaultClasses;

    if (applicationType.equals(YARN_APPLICATION_TYPE)) {
      // TODO restrict the security check to only check if security is enabled for webservices.
      if (UserGroupInformation.isSecurityEnabled()) {
        defaultClasses = DATATORRENT_SECURITY_CLASSES;
      } else {
        defaultClasses = DATATORRENT_CLASSES;
      }
    } else {
      throw new IllegalStateException(applicationType + " is not a valid application type.");
    }

    LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses);

    if (resources != null) {
      localJarFiles.addAll(resources);
    }

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info(
        "Got Cluster metric info from ASM"
            + ", numNodeManagers="
            + clusterMetrics.getNumNodeManagers());

    // GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
    // GetClusterNodesResponse clusterNodesResp =
    // rmClient.clientRM.getClusterNodes(clusterNodesReq);
    // LOG.info("Got Cluster node info from ASM");
    // for (NodeReport node : clusterNodesResp.getNodeReports()) {
    //  LOG.info("Got node report from ASM for"
    //           + ", nodeId=" + node.getNodeId()
    //           + ", nodeAddress" + node.getHttpAddress()
    //           + ", nodeRackName" + node.getRackName()
    //           + ", nodeNumContainers" + node.getNumContainers()
    //           + ", nodeHealthStatus" + node.getHealthReport());
    // }
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
      for (QueueACL userAcl : aclInfo.getUserAcls()) {
        LOG.info(
            "User ACL Info for Queue"
                + ", queueName="
                + aclInfo.getQueueName()
                + ", userAcl="
                + userAcl.name());
      }
    }

    // Get a new application id
    YarnClientApplication newApp = yarnClient.createApplication();
    appId = newApp.getNewApplicationResponse().getApplicationId();

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
    int amMemory = dag.getMasterMemoryMB();
    if (amMemory > maxMem) {
      LOG.info(
          "AM memory specified above max threshold of cluster. Using max value."
              + ", specified="
              + amMemory
              + ", max="
              + maxMem);
      amMemory = maxMem;
    }

    if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) {
      dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString());
    }

    // Create launch context for app master
    LOG.info("Setting up application submission context for ASM");
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    // set the application id
    appContext.setApplicationId(appId);
    // set the application name
    appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME));
    appContext.setApplicationType(this.applicationType);
    if (YARN_APPLICATION_TYPE.equals(this.applicationType)) {
      // appContext.setMaxAppAttempts(1); // no retries until Stram is HA
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Setup security tokens
    // If security is enabled get ResourceManager and NameNode delegation tokens.
    // Set these tokens on the container so that they are sent as part of application submission.
    // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken
    // is also used by ResourceManager to fetch the jars from HDFS and set them up for the
    // application master launch.
    if (UserGroupInformation.isSecurityEnabled()) {
      Credentials credentials = new Credentials();
      String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
      if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
      }

      // For now, only getting tokens for the default file-system.
      FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
      try {
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
          for (Token<?> token : tokens) {
            LOG.info("Got dt for " + fs.getUri() + "; " + token);
          }
        }
      } finally {
        fs.close();
      }

      addRMDelegationToken(tokenRenewer, credentials);

      DataOutputBuffer dob = new DataOutputBuffer();
      credentials.writeTokenStorageToStream(dob);
      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
      amContainer.setTokens(fsTokens);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // copy required jar files to dfs, to be localized for containers
    FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
    try {
      Path appsBasePath =
          new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS);
      Path appPath = new Path(appsBasePath, appId.toString());

      String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {}));

      LOG.info("libjars: {}", libJarsCsv);
      dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv);
      LaunchContainerRunnable.addFilesToLocalResources(
          LocalResourceType.FILE, libJarsCsv, localResources, fs);

      if (archives != null) {
        String[] localFiles = archives.split(",");
        String archivesCsv = copyFromLocal(fs, appPath, localFiles);
        LOG.info("archives: {}", archivesCsv);
        dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv);
        LaunchContainerRunnable.addFilesToLocalResources(
            LocalResourceType.ARCHIVE, archivesCsv, localResources, fs);
      }

      if (files != null) {
        String[] localFiles = files.split(",");
        String filesCsv = copyFromLocal(fs, appPath, localFiles);
        LOG.info("files: {}", filesCsv);
        dag.getAttributes().put(LogicalPlan.FILES, filesCsv);
        LaunchContainerRunnable.addFilesToLocalResources(
            LocalResourceType.FILE, filesCsv, localResources, fs);
      }

      dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString());
      if (dag.getAttributes().get(OperatorContext.STORAGE_AGENT) == null) {
          /* which would be the most likely case */
        Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS);
        // use conf client side to pickup any proxy settings from dt-site.xml
        dag.setAttribute(
            OperatorContext.STORAGE_AGENT, new FSStorageAgent(checkpointPath.toString(), conf));
      }
      if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) {
        dag.setAttribute(
            LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator());
      }

      // Set the log4j properties if needed
      if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(appPath, "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
      }

      if (originalAppId != null) {
        Path origAppPath = new Path(appsBasePath, this.originalAppId);
        LOG.info("Restart from {}", origAppPath);
        copyInitialState(origAppPath);
      }

      // push logical plan to DFS location
      Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME);
      FSDataOutputStream outStream = fs.create(cfgDst, true);
      LogicalPlan.write(this.dag, outStream);
      outStream.close();

      Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
      outStream = fs.create(launchConfigDst, true);
      conf.writeXml(outStream);
      outStream.close();

      FileStatus topologyFileStatus = fs.getFileStatus(cfgDst);
      LocalResource topologyRsrc = Records.newRecord(LocalResource.class);
      topologyRsrc.setType(LocalResourceType.FILE);
      topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
      topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri()));
      topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime());
      topologyRsrc.setSize(topologyFileStatus.getLen());
      localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc);

      // Set local resource info into app master container launch context
      amContainer.setLocalResources(localResources);

      // Set the necessary security tokens as needed
      // amContainer.setContainerTokens(containerToken);
      // Set the env variables to be setup in the env where the application master will be run
      LOG.info("Set the environment for the application master");
      Map<String, String> env = new HashMap<String, String>();

      // Add application jar(s) location to classpath
      // At some point we should not be required to add
      // the hadoop specific classpaths to the env.
      // It should be provided out of the box.
      // For now setting all required classpaths including
      // the classpath to "." for the application jar(s)
      // including ${CLASSPATH} will duplicate the class path in app master, removing it for now
      // StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*");
      StringBuilder classPathEnv = new StringBuilder("./*");
      String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
      for (String c :
          StringUtils.isBlank(classpath)
              ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH
              : classpath.split(",")) {
        if (c.equals("$HADOOP_CLIENT_CONF_DIR")) {
          // SPOI-2501
          continue;
        }
        classPathEnv.append(':');
        classPathEnv.append(c.trim());
      }
      env.put("CLASSPATH", classPathEnv.toString());
      // propagate to replace node managers user name (effective in non-secure mode)
      env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());

      amContainer.setEnvironment(env);

      // Set the necessary command to execute the application master
      ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30);

      // Set java executable command
      LOG.info("Setting up app master command");
      vargs.add(javaCmd);
      if (dag.isDebug()) {
        vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n");
      }
      // Set Xmx based on am memory size
      // default heap size 75% of total memory
      vargs.add("-Xmx" + (amMemory * 3 / 4) + "m");
      vargs.add("-XX:+HeapDumpOnOutOfMemoryError");
      vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin");
      vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA");
      vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
      vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath()));
      if (dag.isDebug()) {
        vargs.add("-Dlog4j.debug=true");
      }

      String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL);
      if (loggersLevel != null) {
        vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel));
      }
      vargs.add(StreamingAppMaster.class.getName());
      vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
      vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

      // Get final command
      StringBuilder command = new StringBuilder(9 * vargs.size());
      for (CharSequence str : vargs) {
        command.append(str).append(" ");
      }

      LOG.info("Completed setting up app master command " + command.toString());
      List<String> commands = new ArrayList<String>();
      commands.add(command.toString());
      amContainer.setCommands(commands);

      // Set up resource type requirements
      // For now, only memory is supported so we set memory requirements
      Resource capability = Records.newRecord(Resource.class);
      capability.setMemory(amMemory);
      appContext.setResource(capability);

      // Service data is a binary blob that can be passed to the application
      // Not needed in this scenario
      // amContainer.setServiceData(serviceData);
      appContext.setAMContainerSpec(amContainer);

      // Set the priority for the application master
      Priority pri = Records.newRecord(Priority.class);
      pri.setPriority(amPriority);
      appContext.setPriority(pri);
      // Set the queue to which this application is to be submitted in the RM
      appContext.setQueue(queueName);

      // Submit the application to the applications manager
      // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest);
      // Ignore the response as either a valid response object is returned on success
      // or an exception thrown to denote some form of a failure
      String specStr =
          Objects.toStringHelper("Submitting application: ")
              .add("name", appContext.getApplicationName())
              .add("queue", appContext.getQueue())
              .add("user", UserGroupInformation.getLoginUser())
              .add("resource", appContext.getResource())
              .toString();
      LOG.info(specStr);
      if (dag.isDebug()) {
        // LOG.info("Full submission context: " + appContext);
      }
      yarnClient.submitApplication(appContext);
    } finally {
      fs.close();
    }
  }
Ejemplo n.º 30
0
 public void writeRequest(OutputStream out) throws IOException {
   DataOutputStream dos = new DataOutputStream(out);
   dos.writeInt(buffers.size());
   for (DataOutputBuffer b : buffers) dos.write(b.getData(), 0, b.getLength());
 }