Beispiel #1
0
 // TCP large RECEIVE of results.  Note that 'this' is NOT the RPC object
 // that is hoping to get the received object, nor is the current thread the
 // RPC thread blocking for the object.  The current thread is the TCP
 // reader thread.
 static void tcp_ack(final AutoBuffer ab) throws IOException {
   // Get the RPC we're waiting on
   int task = ab.getTask();
   RPC rpc = ab._h2o.taskGet(task);
   // Race with canceling a large RPC fetch: Task is already dead.  Do not
   // bother reading from the TCP socket, just bail out & close socket.
   if (rpc == null || rpc._done) {
     ab.drainClose();
   } else {
     assert rpc._tasknum == task;
     assert !rpc._done;
     // Here we have the result, and we're on the correct Node but wrong
     // Thread.  If we just return, the TCP reader thread will close the
     // remote, the remote will UDP ACK the RPC back, and back on the current
     // Node but in the correct Thread, we'd wake up and realize we received a
     // large result.
     try {
       rpc.response(ab);
     } catch (AutoBuffer.AutoBufferException e) {
       // If TCP fails, we will have done a short-read crushing the original
       // _dt object, and be unable to resend.  This is fatal right now.
       // Really: an unimplemented feature; fix is to notice that a partial
       // TCP read means that the server (1) got our remote_exec request, (2)
       // has computed an answer and was trying to send it to us, (3) failed
       // sending via TCP hence the server knows it failed and will send again
       // without any further work from us.  We need to disable all the resend
       // & retry logic, and wait for the server to re-send our result.
       // Meanwhile the _dt object is crushed with half-read crap, and cannot
       // be trusted except in the base fields.
       throw Log.throwErr(e._ioe);
     }
   }
   // ACKACK the remote, telling him "we got the answer"
   new AutoBuffer(ab._h2o, H2O.ACK_ACK_PRIORITY).putTask(UDP.udp.ackack.ordinal(), task).close();
 }
Beispiel #2
0
  public synchronized RPC<V> call() {
    // Any Completer will not be carried over to remote; add it to the RPC call
    // so completion is signaled after the remote comes back.
    CountedCompleter cc = _dt.getCompleter();
    if (cc != null) handleCompleter(cc);

    // If running on self, just submit to queues & do locally
    if (_target == H2O.SELF) return handleLocal();

    // Keep a global record, for awhile
    if (_target != null) _target.taskPut(_tasknum, this);
    try {
      if (_nack) return this; // Racing Nack rechecked under lock; no need to send retry
      // We could be racing timeouts-vs-replies.  Blow off timeout if we have an answer.
      if (isDone()) {
        if (_target != null) _target.taskRemove(_tasknum);
        return this;
      }
      // Default strategy: (re)fire the packet and (re)start the timeout.  We
      // "count" exactly 1 failure: just whether or not we shipped via TCP ever
      // once.  After that we fearlessly (re)send UDP-sized packets until the
      // server replies.

      // Pack classloader/class & the instance data into the outgoing
      // AutoBuffer.  If it fits in a single UDP packet, ship it.  If not,
      // finish off the current AutoBuffer (which is now going TCP style), and
      // make a new UDP-sized packet.  On a re-send of a TCP-sized hunk, just
      // send the basic UDP control packet.
      if (!_sentTcp) {
        while (true) { // Retry loop for broken TCP sends
          AutoBuffer ab = new AutoBuffer(_target, _dt.priority());
          try {
            final boolean t;
            int offset = ab.position();
            ab.putTask(UDP.udp.exec, _tasknum).put1(CLIENT_UDP_SEND);
            ab.put(_dt);
            t = ab.hasTCP();
            assert sz_check(ab)
                : "Resend of "
                    + _dt.getClass()
                    + " changes size from "
                    + _size
                    + " to "
                    + ab.size()
                    + " for task#"
                    + _tasknum;
            ab.close(); // Then close; send final byte
            _sentTcp = t; // Set after close (and any other possible fail)
            break; // Break out of retry loop
          } catch (AutoBuffer.AutoBufferException e) {
            Log.info(
                "IOException during RPC call: "
                    + e._ioe.getMessage()
                    + ",  AB="
                    + ab
                    + ", for task#"
                    + _tasknum
                    + ", waiting and retrying...");
            ab.drainClose();
            try {
              Thread.sleep(500);
            } catch (InterruptedException ignore) {
            }
          }
        } // end of while(true)
      } else {
        // Else it was sent via TCP in a prior attempt, and we've timed out.
        // This means the caller's ACK/answer probably got dropped and we need
        // him to resend it (or else the caller is still processing our
        // request).  Send a UDP reminder - but with the CLIENT_TCP_SEND flag
        // instead of the UDP send, and no DTask (since it previously went via
        // TCP, no need to resend it).
        AutoBuffer ab = new AutoBuffer(_target, _dt.priority()).putTask(UDP.udp.exec, _tasknum);
        ab.put1(CLIENT_TCP_SEND).close();
      }
      // Double retry until we exceed existing age.  This is the time to delay
      // until we try again.  Note that we come here immediately on creation,
      // so the first doubling happens before anybody does any waiting.  Also
      // note the generous 5sec cap: ping at least every 5 sec.
      _retry += (_retry < MAX_TIMEOUT) ? _retry : MAX_TIMEOUT;
      // Put self on the "TBD" list of tasks awaiting Timeout.
      // So: dont really 'forget' but remember me in a little bit.
      //      UDPTimeOutThread.PENDING.put(_tasknum, this);
      return this;
    } catch (Throwable t) {
      t.printStackTrace();
      throw Log.throwErr(t);
    }
  }