Esempio n. 1
0
  // If running on self, just submit to queues & do locally
  private RPC<V> handleLocal() {
    assert _dt.getCompleter() == null;
    _dt.setCompleter(
        new H2O.H2OCallback<DTask>() {
          @Override
          public void callback(DTask dt) {
            synchronized (RPC.this) {
              _done = true;
              RPC.this.notifyAll();
            }
            doAllCompletions();
          }

          @Override
          public boolean onExceptionalCompletion(Throwable ex, CountedCompleter dt) {
            synchronized (RPC.this) { // Might be called several times
              if (_done) return true; // Filter down to 1st exceptional completion
              _dt.setException(ex);
              // must be the last set before notify call cause the waiting thread
              // can wake up at any moment independently on notify
              _done = true;
              RPC.this.notifyAll();
            }
            doAllCompletions();
            return true;
          }
        });
    H2O.submitTask(_dt);
    return this;
  }
Esempio n. 2
0
 private void doAllCompletions() {
   final Exception e = _dt.getDException();
   // Also notify any and all pending completion-style tasks
   if (_fjtasks != null)
     for (final H2OCountedCompleter task : _fjtasks) {
       H2O.submitTask(
           new H2OCountedCompleter(task.priority()) {
             @Override
             public void compute2() {
               if (e != null) // re-throw exception on this side as if it happened locally
               task.completeExceptionally(e);
               else
                 try {
                   task.__tryComplete(_dt);
                 } catch (Throwable e) {
                   task.completeExceptionally(e);
                 }
             }
           });
     }
 }
Esempio n. 3
0
 public final V get(long timeout, TimeUnit unit) {
   if (_done) return _dt; // Fast-path shortcut
   throw H2O.unimpl();
 }
Esempio n. 4
0
  // Handle traffic, from a client to this server asking for work to be done.
  // Called from either a F/J thread (generally with a UDP packet) or from the
  // TCPReceiver thread.
  static void remote_exec(AutoBuffer ab) {
    long lo = ab.get8(0), hi = ab._size >= 16 ? ab.get8(8) : 0;
    final int task = ab.getTask();
    final int flag = ab.getFlag();
    assert flag == CLIENT_UDP_SEND || flag == CLIENT_TCP_SEND; // Client-side send
    // Atomically record an instance of this task, one-time-only replacing a
    // null with an RPCCall, a placeholder while we work on a proper response -
    // and it serves to let us discard dup UDP requests.
    RPCCall old = ab._h2o.has_task(task);
    // This is a UDP packet requesting an answer back for a request sent via
    // TCP but the UDP packet has arrived ahead of the TCP.  Just drop the UDP
    // and wait for the TCP to appear.
    if (old == null && flag == CLIENT_TCP_SEND) {
      Log.warn(
          "got tcp with existing task #, FROM "
              + ab._h2o.toString()
              + " AB: " /* +  UDP.printx16(lo,hi)*/);
      assert !ab.hasTCP()
          : "ERROR: got tcp with existing task #, FROM "
              + ab._h2o.toString()
              + " AB: " /* + UDP.printx16(lo,hi)*/; // All the resends should be UDP only
      // DROP PACKET
    } else if (old == null) { // New task?
      RPCCall rpc;
      try {
        // Read the DTask Right Now.  If we are the TCPReceiver thread, then we
        // are reading in that thread... and thus TCP reads are single-threaded.
        rpc = new RPCCall(ab.get(water.DTask.class), ab._h2o, task);
      } catch (AutoBuffer.AutoBufferException e) {
        // Here we assume it's a TCP fail on read - and ignore the remote_exec
        // request.  The caller will send it again.  NOTE: this case is
        // indistinguishable from a broken short-writer/long-reader bug, except
        // that we'll re-send endlessly and fail endlessly.
        Log.info(
            "Network congestion OR short-writer/long-reader: TCP "
                + e._ioe.getMessage()
                + ",  AB="
                + ab
                + ", ignoring partial send");
        ab.drainClose();
        return;
      }
      RPCCall rpc2 = ab._h2o.record_task(rpc);
      if (rpc2 == null) { // Atomically insert (to avoid double-work)
        if (rpc._dt instanceof MRTask && rpc._dt.logVerbose())
          Log.debug("Start remote task#" + task + " " + rpc._dt.getClass() + " from " + ab._h2o);
        H2O.submitTask(rpc); // And execute!
      } else { // Else lost the task-insertion race
        if (ab.hasTCP()) ab.drainClose();
        // DROP PACKET
      }

    } else if (!old._computedAndReplied) {
      // This packet has not been fully computed.  Hence it's still a work-in-
      // progress locally.  We have no answer to reply but we do not want to
      // re-offer the packet for repeated work.  Send back a NACK, letting the
      // client know we're Working On It
      assert !ab.hasTCP()
          : "got tcp with existing task #, FROM "
              + ab._h2o.toString()
              + " AB: "
              + UDP.printx16(lo, hi)
              + ", position = "
              + ab._bb.position();
      ab.clearForWriting(udp.nack._prior).putTask(UDP.udp.nack.ordinal(), task);
      // DROP PACKET
    } else {
      // This is an old re-send of the same thing we've answered to before.
      // Send back the same old answer ACK.  If we sent via TCP before, then
      // we know the answer got there so just send a control-ACK back.  If we
      // sent via UDP, resend the whole answer.
      if (ab.hasTCP()) {
        Log.warn(
            "got tcp with existing task #, FROM "
                + ab._h2o.toString()
                + " AB: "
                + UDP.printx16(lo, hi)); // All the resends should be UDP only
        ab.drainClose();
      }
      if (old._dt != null) { // already ackacked
        ++old._ackResendCnt;
        if (old._ackResendCnt % 10 == 0)
          Log.err(
              "Possibly broken network, can not send ack through, got "
                  + old._ackResendCnt
                  + " for task # "
                  + old._tsknum
                  + ", dt == null?"
                  + (old._dt == null));
        old.resend_ack();
      }
    }
    ab.close();
  }
Esempio n. 5
0
 @Override
 AutoBuffer call(AutoBuffer ab) {
   throw H2O.fail();
 }