Beispiel #1
0
 // TCP large RECEIVE of results.  Note that 'this' is NOT the RPC object
 // that is hoping to get the received object, nor is the current thread the
 // RPC thread blocking for the object.  The current thread is the TCP
 // reader thread.
 static void tcp_ack(final AutoBuffer ab) throws IOException {
   // Get the RPC we're waiting on
   int task = ab.getTask();
   RPC rpc = ab._h2o.taskGet(task);
   // Race with canceling a large RPC fetch: Task is already dead.  Do not
   // bother reading from the TCP socket, just bail out & close socket.
   if (rpc == null || rpc._done) {
     ab.drainClose();
   } else {
     assert rpc._tasknum == task;
     assert !rpc._done;
     // Here we have the result, and we're on the correct Node but wrong
     // Thread.  If we just return, the TCP reader thread will close the
     // remote, the remote will UDP ACK the RPC back, and back on the current
     // Node but in the correct Thread, we'd wake up and realize we received a
     // large result.
     try {
       rpc.response(ab);
     } catch (AutoBuffer.AutoBufferException e) {
       // If TCP fails, we will have done a short-read crushing the original
       // _dt object, and be unable to resend.  This is fatal right now.
       // Really: an unimplemented feature; fix is to notice that a partial
       // TCP read means that the server (1) got our remote_exec request, (2)
       // has computed an answer and was trying to send it to us, (3) failed
       // sending via TCP hence the server knows it failed and will send again
       // without any further work from us.  We need to disable all the resend
       // & retry logic, and wait for the server to re-send our result.
       // Meanwhile the _dt object is crushed with half-read crap, and cannot
       // be trusted except in the base fields.
       throw Log.throwErr(e._ioe);
     }
   }
   // ACKACK the remote, telling him "we got the answer"
   new AutoBuffer(ab._h2o, H2O.ACK_ACK_PRIORITY).putTask(UDP.udp.ackack.ordinal(), task).close();
 }
Beispiel #2
0
 // Start an RPC to fetch a Value, handling short-cutting dup-fetches
 static RPC<TaskGetKey> start(H2ONode target, Key key) {
   // Do we have an old TaskGetKey in-progress?
   RPC<TaskGetKey> old = TGKS.get(key);
   if (old != null) return old;
   // Make a new TGK.
   RPC<TaskGetKey> rpc = new RPC(target, new TaskGetKey(key), 1.0f);
   if ((old = TGKS.putIfMatchUnlocked(key, rpc, null)) != null)
     return old; // Failed because an old exists
   rpc.setTaskNum().call(); // Start the op
   return rpc; // Successful install of a fresh RPC
 }
Beispiel #3
0
 // TCP large RECEIVE of results.  Note that 'this' is NOT the RPC object
 // that is hoping to get the received object, nor is the current thread the
 // RPC thread blocking for the object.  The current thread is the TCP
 // reader thread.
 static void tcp_ack(final AutoBuffer ab) {
   // Get the RPC we're waiting on
   int task = ab.getTask();
   RPC rpc = TASKS.get(task);
   // Race with canceling a large RPC fetch: Task is already dead.  Do not
   // bother reading from the TCP socket, just bail out & close socket.
   if (rpc == null) {
     ab.drainClose();
   } else {
     assert rpc._tasknum == task;
     assert !rpc._done;
     // Here we have the result, and we're on the correct Node but wrong
     // Thread.  If we just return, the TCP reader thread will close the
     // remote, the remote will UDP ACK the RPC back, and back on the current
     // Node but in the correct Thread, we'd wake up and realize we received a
     // large result.
     rpc.response(ab);
   }
   // ACKACK the remote, telling him "we got the answer"
   new AutoBuffer(ab._h2o).putTask(UDP.udp.ackack.ordinal(), task).close(true);
 }
Beispiel #4
0
  public static String store2Hdfs(Key srcKey) {
    assert srcKey._kb[0] != Key.ARRAYLET_CHUNK;
    assert PersistHdfs.getPathForKey(srcKey) != null; // Validate key name
    Value v = DKV.get(srcKey);
    if (v == null) return "Key " + srcKey + " not found";
    if (v._isArray == 0) { // Simple chunk?
      v.setHdfs(); // Set to HDFS and be done
      return null; // Success
    }

    // For ValueArrays, make the .hex header
    ValueArray ary = ValueArray.value(v);
    String err = PersistHdfs.freeze(srcKey, ary);
    if (err != null) return err;

    // The task managing which chunks to write next,
    // store in a known key
    TaskStore2HDFS ts = new TaskStore2HDFS(srcKey);
    Key selfKey = ts.selfKey();
    UKV.put(selfKey, ts);

    // Then start writing chunks in-order with the zero chunk
    H2ONode chk0_home = ValueArray.getChunkKey(0, srcKey).home_node();
    RPC.call(ts.chunkHome(), ts);

    // Watch the progress key until it gets removed or an error appears
    long idx = 0;
    while (UKV.get(selfKey, ts) != null) {
      if (ts._indexFrom != idx) {
        System.out.print(" " + idx + "/" + ary.chunks());
        idx = ts._indexFrom;
      }
      if (ts._err != null) { // Found an error?
        UKV.remove(selfKey); // Cleanup & report
        return ts._err;
      }
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
      }
    }
    System.out.println(" " + ary.chunks() + "/" + ary.chunks());

    // PersistHdfs.refreshHDFSKeys();
    return null;
  }
Beispiel #5
0
  @Override
  public void compute() {
    String path = null; // getPathFromValue(val);
    ValueArray ary = ValueArray.value(_arykey);
    Key self = selfKey();

    while (_indexFrom < ary.chunks()) {
      Key ckey = ary.getChunkKey(_indexFrom++);
      if (!ckey.home()) { // Next chunk not At Home?
        RPC.call(chunkHome(), this); // Hand the baton off to the next node/chunk
        return;
      }
      Value val = DKV.get(ckey); // It IS home, so get the data
      _err = PersistHdfs.appendChunk(_arykey, val);
      if (_err != null) return;
      UKV.put(self, this); // Update the progress/self key
    }
    // We did the last chunk.  Removing the selfKey is the signal to the web
    // thread that All Done.
    UKV.remove(self);
  }
Beispiel #6
0
 static void put(H2ONode h2o, Key key, Value val, Futures fs, boolean dontCache) {
   fs.add(RPC.call(h2o, new TaskPutKey(key, val, dontCache)));
 }
Beispiel #7
0
 static Value get(RPC<TaskGetKey> rpc) {
   return rpc.get()._val; // Block for it
 }