public HostConnectionPool(Host host, HostDistance hostDistance, Session.Manager manager)
      throws ConnectionException {
    assert hostDistance != HostDistance.IGNORED;
    this.host = host;
    this.hostDistance = hostDistance;
    this.manager = manager;

    this.newConnectionTask =
        new Runnable() {
          @Override
          public void run() {
            addConnectionIfUnderMaximum();
            scheduledForCreation.decrementAndGet();
          }
        };

    // Create initial core connections
    List<Connection> l =
        new ArrayList<Connection>(options().getCoreConnectionsPerHost(hostDistance));
    try {
      for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++)
        l.add(manager.connectionFactory().open(host));
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      // If asked to interrupt, we can skip opening core connections, the pool will still work.
      // But we ignore otherwise cause I'm not sure we can do much better currently.
    }
    this.connections = new CopyOnWriteArrayList<Connection>(l);
    this.open = new AtomicInteger(connections.size());

    logger.trace("Created connection pool to host {}", host);
  }
Esempio n. 2
0
 /** {@inheritDoc} */
 public List<$T> readAllNow() {
   lockWrite();
   try {
     int $p = $gate.drainPermits();
     if ($p == 0) {
       return Collections.emptyList();
     } else {
       List<$T> $v = new ArrayList<$T>($p);
       for (int $i = 0; $i < $p; $i++) $v.add($queue.poll());
       return $v;
     }
   } finally {
     unlockWrite();
     checkForFinale();
   }
 }
  public Connection borrowConnection(long timeout, TimeUnit unit)
      throws ConnectionException, TimeoutException {
    if (isShutdown.get())
      // Note: throwing a ConnectionException is probably fine in practice as it will trigger the
      // creation of a new host.
      // That being said, maybe having a specific exception could be cleaner.
      throw new ConnectionException(host.getAddress(), "Pool is shutdown");

    if (connections.isEmpty()) {
      for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) {
        // We don't respect MAX_SIMULTANEOUS_CREATION here because it's  only to
        // protect against creating connection in excess of core too quickly
        scheduledForCreation.incrementAndGet();
        manager.executor().submit(newConnectionTask);
      }
      Connection c = waitForConnection(timeout, unit);
      c.setKeyspace(manager.poolsState.keyspace);
      return c;
    }

    int minInFlight = Integer.MAX_VALUE;
    Connection leastBusy = null;
    for (Connection connection : connections) {
      int inFlight = connection.inFlight.get();
      if (inFlight < minInFlight) {
        minInFlight = inFlight;
        leastBusy = connection;
      }
    }

    if (minInFlight >= options().getMaxSimultaneousRequestsPerConnectionThreshold(hostDistance)
        && connections.size() < options().getMaxConnectionsPerHost(hostDistance))
      maybeSpawnNewConnection();

    while (true) {
      int inFlight = leastBusy.inFlight.get();

      if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) {
        leastBusy = waitForConnection(timeout, unit);
        break;
      }

      if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break;
    }
    leastBusy.setKeyspace(manager.poolsState.keyspace);
    return leastBusy;
  }
  /**
   * @param p Partition.
   * @param topVer Topology version ({@code -1} for all nodes).
   * @param state Partition state.
   * @param states Additional partition states.
   * @return List of nodes for the partition.
   */
  private List<ClusterNode> nodes(
      int p,
      AffinityTopologyVersion topVer,
      GridDhtPartitionState state,
      GridDhtPartitionState... states) {
    Collection<UUID> allIds =
        topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null;

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer="
              + topVer
              + ", allIds="
              + allIds
              + ", node2part="
              + node2part
              + ", cache="
              + cctx.name()
              + ']';

      Collection<UUID> nodeIds = part2node.get(p);

      // Node IDs can be null if both, primary and backup, nodes disappear.
      int size = nodeIds == null ? 0 : nodeIds.size();

      if (size == 0) return Collections.emptyList();

      List<ClusterNode> nodes = new ArrayList<>(size);

      for (UUID id : nodeIds) {
        if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue;

        if (hasState(p, id, state, states)) {
          ClusterNode n = cctx.discovery().node(id);

          if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
            nodes.add(n);
        }
      }

      return nodes;
    } finally {
      lock.readLock().unlock();
    }
  }
Esempio n. 5
0
  public static void main(String[] args) throws InterruptedException, NoSuchMethodException {
    int nthreads = Integer.parseInt(args[0]);
    int ncounters = Integer.parseInt(args[1]);
    String type = args[2];
    int nexecutions = Integer.parseInt(args[3]);

    List<Counter> counters = new ArrayList<Counter>();
    for (int i = 0; i < ncounters; i++) {
      Lock lock;
      if (type.equals("s")) lock = new safe.ReentrantLock();
      else if (type.equals("d")) lock = new safe.ReentrantLockDirect();
      else lock = new java.util.concurrent.locks.ReentrantLock();
      counters.add(new Counter(lock));
    }

    runExecutionBoundedTest(nthreads, counters, nexecutions);
  }
 /**
  * Adds a hook to the HookManager. You can associate more than one hook with a given message type
  * which will be returned in order by getHooks().
  *
  * @param msgType the message type to match
  * @param hook the hook to be called for matching messages
  * @see EnginePlugin#handleMessageImpl
  */
 public void addHook(MessageType msgType, Hook hook) {
   lock.lock();
   try {
     List<Hook> hookList = hooks.get(msgType);
     if (hookList == null) {
       hookList = new LinkedList<Hook>();
       hookList.add(hook);
       hooks.put(msgType, hookList);
     } else {
       hookList = new LinkedList<Hook>(hookList);
       hookList.add(hook);
       hooks.put(msgType, hookList);
     }
   } finally {
     lock.unlock();
   }
 }
  public void handlePerception(PerceptionMessage perceptionMessage) {
    long targetOid = perceptionMessage.getTarget();
    List<PerceptionMessage.ObjectNote> gain = perceptionMessage.getGainObjects();
    List<PerceptionMessage.ObjectNote> lost = perceptionMessage.getLostObjects();

    if (Log.loggingDebug)
      Log.debug(
          "ProximityTracker.handlePerception: targetOid + "
              + targetOid
              + ", instanceOid="
              + instanceOid
              + " "
              + ((gain == null) ? 0 : gain.size())
              + " gain and "
              + ((lost == null) ? 0 : lost.size())
              + " lost");

    if (gain != null) for (PerceptionMessage.ObjectNote note : gain) maybeAddPerceivedObject(note);

    if (lost != null)
      for (PerceptionMessage.ObjectNote note : lost)
        maybeRemovePerceivedObject(note.getSubject(), note, targetOid);
  }
  private void replace(final Connection connection) {
    connections.remove(connection);

    manager
        .executor()
        .submit(
            new Runnable() {
              @Override
              public void run() {
                connection.close();
                addConnectionIfUnderMaximum();
              }
            });
  }
  private boolean trashConnection(Connection connection) {
    // First, make sure we don't go below core connections
    for (; ; ) {
      int opened = open.get();
      if (opened <= options().getCoreConnectionsPerHost(hostDistance)) return false;

      if (open.compareAndSet(opened, opened - 1)) break;
    }
    trash.add(connection);
    connections.remove(connection);

    if (connection.inFlight.get() == 0 && trash.remove(connection)) close(connection);
    return true;
  }
  public void returnConnection(Connection connection) {
    int inFlight = connection.inFlight.decrementAndGet();

    if (connection.isDefunct()) {
      if (host.getMonitor().signalConnectionFailure(connection.lastException())) shutdown();
      else replace(connection);
    } else {

      if (trash.contains(connection) && inFlight == 0) {
        if (trash.remove(connection)) close(connection);
        return;
      }

      if (connections.size() > options().getCoreConnectionsPerHost(hostDistance)
          && inFlight <= options().getMinSimultaneousRequestsPerConnectionThreshold(hostDistance)) {
        trashConnection(connection);
      } else {
        signalAvailableConnection();
      }
    }
  }
  private boolean addConnectionIfUnderMaximum() {

    // First, make sure we don't cross the allowed limit of open connections
    for (; ; ) {
      int opened = open.get();
      if (opened >= options().getMaxConnectionsPerHost(hostDistance)) return false;

      if (open.compareAndSet(opened, opened + 1)) break;
    }

    if (isShutdown()) {
      open.decrementAndGet();
      return false;
    }

    // Now really open the connection
    try {
      connections.add(manager.connectionFactory().open(host));
      signalAvailableConnection();
      return true;
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      // Skip the open but ignore otherwise
      open.decrementAndGet();
      return false;
    } catch (ConnectionException e) {
      open.decrementAndGet();
      logger.debug("Connection error to {} while creating additional connection", host);
      if (host.getMonitor().signalConnectionFailure(e)) shutdown();
      return false;
    } catch (AuthenticationException e) {
      // This shouldn't really happen in theory
      open.decrementAndGet();
      logger.error(
          "Authentication error while creating additional connection (error is: {})",
          e.getMessage());
      shutdown();
      return false;
    }
  }
Esempio n. 12
0
  private static void runExecutionBoundedTest(int nthreads, List<Counter> counters, int nexecutions)
      throws InterruptedException {
    final List<CounterThread> threads = new ArrayList<CounterThread>();
    for (int i = 0; i < nthreads; i++) {
      CounterThread t;
      t = new CounterThread(counters.get(i % counters.size()), nexecutions);
      t.start();
      threads.add(t);
    }

    CounterThread.shoot(); // let all the threads go crazy at the same time

    for (int i = 0; i < nthreads; i++) {
      CounterThread t = threads.get(i);
      t.join(30000);
      if (t.isAlive()) {
        System.out.println("stuck thread name: " + t.toString());
        System.out.println("stuck thread state: " + t.getState());
        safe.ReentrantLock l = (safe.ReentrantLock) t.getLock();
        System.out.println("thread waiting lock: " + l);
        System.out.println("stuck thread increments: " + t.getExecutions());
        System.out.println("stuck thread counter value: " + t.getCounterCount());
        Thread other = l.getOwner();
        System.out.println("lock owner: " + other);
        if (other != null) {
          System.out.println("owner name: " + other.toString());
          System.out.println("state owner: " + other.getState());
        }
        // Keep program alive to dump thread stacks with: jstack -l $(pidof java)
        System.out.println(
            java.lang.management.ManagementFactory.getThreadMXBean().getPeakThreadCount());
        while (true) {}
      }
    }
    long sum = 0;
    for (int i = 0; i < counters.size(); ++i) {
      sum += counters.get(i).getCount();
    }
    System.out.println(sum);
    System.out.println(
        java.lang.management.ManagementFactory.getThreadMXBean().getPeakThreadCount());
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
Esempio n. 14
0
  /**
   * we have a packet that belongs to the passed in connection. process the packet for the
   * connection. It returns true if the connection is open and the packet was a data packet
   */
  boolean processExistingConnection(RDPConnection con, RDPPacket packet) {

    if (Log.loggingNet)
      Log.net("RDPServer.processExistingConnection: con state=" + con + ", packet=" + packet);
    packetCounter.add();

    int state = con.getState();
    if (state == RDPConnection.LISTEN) {
      // something is wrong, we shouldn't be here
      // we get to this method after looking in the connections map
      // but all LISTEN connections should be listed direct
      // from serversockets
      Log.error("RDPServer.processExistingConnection: connection shouldnt be in LISTEN state");
      return false;
    }
    if (state == RDPConnection.SYN_SENT) {
      if (!packet.isAck()) {
        Log.warn("got a non-ack packet when we're in SYN_SENT");
        return false;
      }
      if (!packet.isSyn()) {
        Log.warn("got a non-syn packet when we're in SYN_SENT");
        return false;
      }
      if (Log.loggingNet) Log.net("good: got syn-ack packet in syn_sent");

      // make sure its acking our initial segment #
      if (packet.getAckNum() != con.getInitialSendSeqNum()) {
        if (Log.loggingNet) Log.net("syn's ack number does not match initial seq #");
        return false;
      }

      con.setRcvCur(packet.getSeqNum());
      con.setRcvIrs(packet.getSeqNum());
      con.setMaxSendUnacks(packet.getSendUnacks());
      con.setMaxReceiveSegmentSize(packet.getMaxRcvSegmentSize());
      con.setSendUnackd(packet.getAckNum() + 1);

      // ack first before setting state to open
      // otherwise some other thread will get woken up and send data
      // before we send the ack
      if (Log.loggingNet) Log.net("new connection state: " + con);
      RDPPacket replyPacket = new RDPPacket(con);
      con.sendPacketImmediate(replyPacket, false);
      con.setState(RDPConnection.OPEN);
      return false;
    }
    if (state == RDPConnection.SYN_RCVD) {
      if (packet.getSeqNum() <= con.getRcvIrs()) {
        Log.error("seqnum is not above rcv initial seq num");
        return false;
      }
      if (packet.getSeqNum() > (con.getRcvCur() + (con.getRcvMax() * 2))) {
        Log.error("seqnum is too big");
        return false;
      }
      if (packet.isAck()) {
        if (packet.getAckNum() == con.getInitialSendSeqNum()) {
          if (Log.loggingNet) Log.net("got ack for our syn - setting state to open");
          con.setState(RDPConnection.OPEN); // this will notify()

          // call the accept callback
          // first find the serversocket
          DatagramChannel dc = con.getDatagramChannel();
          if (dc == null) {
            throw new MVRuntimeException(
                "RDPServer.processExistingConnection: no datagramchannel for connection that just turned OPEN");
          }
          RDPServerSocket rdpSocket = RDPServer.getRDPSocket(dc);
          if (rdpSocket == null) {
            throw new MVRuntimeException(
                "RDPServer.processExistingConnection: no socket for connection that just turned OPEN");
          }
          ClientConnection.AcceptCallback acceptCB = rdpSocket.getAcceptCallback();
          if (acceptCB != null) {
            acceptCB.acceptConnection(con);
          } else {
            Log.warn("serversocket has no accept callback");
          }
          if (Log.loggingNet)
            Log.net(
                "RDPServer.processExistingConnection: got ACK, removing from unack list: "
                    + packet.getSeqNum());
          con.removeUnackPacket(packet.getSeqNum());
        }
      }
    }
    if (state == RDPConnection.CLOSE_WAIT) {
      // reply with a reset on all packets
      if (!packet.isRst()) {
        RDPPacket rstPacket = RDPPacket.makeRstPacket();
        con.sendPacketImmediate(rstPacket, false);
      }
    }
    if (state == RDPConnection.OPEN) {
      if (packet.isRst()) {
        // the other side wants to close the connection
        // set the state,
        // dont call con.close() since that will send a reset packet
        if (Log.loggingDebug)
          Log.debug("RDPServer.processExistingConnection: got reset packet for con " + con);
        if (con.getState() != RDPConnection.CLOSE_WAIT) {
          con.setState(RDPConnection.CLOSE_WAIT);
          con.setCloseWaitTimer();
          // Only invoke callback when moving into CLOSE_WAIT
          // state.  This prevents two calls to connectionReset.
          Log.net("RDPServer.processExistingConnection: calling reset callback");
          ClientConnection.MessageCallback pcb = con.getCallback();
          pcb.connectionReset(con);
        }

        return false;
      }
      if (packet.isSyn()) {
        // this will close the connection (put into CLOSE_WAIT)
        // send a reset packet and call the connectionReset callback
        Log.error(
            "RDPServer.processExistingConnection: closing connection because we got a syn packet, con="
                + con);
        con.close();
        return false;
      }

      // TODO: shouldnt it be ok for it to have same seq num?
      // if it is a 0 data packet?
      long rcvCur = con.getRcvCur();
      if (packet.getSeqNum() <= rcvCur) {
        if (Log.loggingNet)
          Log.net("RDPServer.processExistingConnection: seqnum too small - acking/not process");
        if (packet.getData() != null) {
          if (Log.loggingNet)
            Log.net(
                "RDPServer.processExistingConnection: sending ack even though seqnum out of range");
          RDPPacket replyPacket = new RDPPacket(con);
          con.sendPacketImmediate(replyPacket, false);
        }
        return false;
      }
      if (packet.getSeqNum() > (rcvCur + (con.getRcvMax() * 2))) {
        Log.error("RDPServer.processExistingConnection: seqnum too big - discarding");
        return false;
      }
      if (packet.isAck()) {
        if (Log.loggingNet)
          Log.net("RDPServer.processExistingConnection: processing ack " + packet.getAckNum());
        // lock for race condition (read then set)
        con.getLock().lock();
        try {
          if (packet.getAckNum() >= con.getSendNextSeqNum()) {
            // acking something we didnt even send yet
            Log.error(
                "RDPServer.processExistingConnection: discarding -- got ack #"
                    + packet.getAckNum()
                    + ", but our next send seqnum is "
                    + con.getSendNextSeqNum()
                    + " -- "
                    + con);
            return false;
          }
          if (con.getSendUnackd() <= packet.getAckNum()) {
            con.setSendUnackd(packet.getAckNum() + 1);
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection: updated send_unackd num to "
                      + con.getSendUnackd()
                      + " (one greater than packet ack) - "
                      + con);
            con.removeUnackPacketUpTo(packet.getAckNum());
          }
          if (packet.isEak()) {
            List eackList = packet.getEackList();
            Iterator iter = eackList.iterator();
            while (iter.hasNext()) {
              Long seqNum = (Long) iter.next();
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: got EACK: " + seqNum);
              con.removeUnackPacket(seqNum.longValue());
            }
          }
        } finally {
          con.getLock().unlock();
          if (Log.loggingNet)
            Log.net("RDPServer.processExistingConnection: processed ack " + packet.getAckNum());
        }
      }
      // process the data
      byte[] data = packet.getData();
      if ((data != null) || packet.isNul()) {
        dataCounter.add();

        // lock - since racecondition: we read then set
        con.getLock().lock();
        try {
          rcvCur = con.getRcvCur(); // update rcvCur
          if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: rcvcur is " + rcvCur);

          ClientConnection.MessageCallback pcb = con.getCallback();
          if (pcb == null) {
            Log.warn("RDPServer.processExistingConnection: no packet callback registered");
          }

          // call callback only if we havent seen it already - eackd
          if (!con.hasEack(packet.getSeqNum())) {
            if (con.isSequenced()) {
              // this is a sequential connection,
              // make sure this is the 'next' packet
              // is this the next sequential packet
              if (packet.getSeqNum() == (rcvCur + 1)) {
                // this is the next packet
                if (Log.loggingNet)
                  Log.net(
                      "RDPServer.processExistingConnection: conn is sequenced and received next packet, rcvCur="
                          + rcvCur
                          + ", packet="
                          + packet);
                if ((pcb != null) && (data != null)) {
                  queueForCallbackProcessing(pcb, con, packet);
                }
              } else {
                // not the next packet, place it in queue
                if (Log.loggingNet)
                  Log.net(
                      "RDPServer.processExistingConnection: conn is sequenced, BUT PACKET is OUT OF ORDER: rcvcur="
                          + rcvCur
                          + ", packet="
                          + packet);
                con.addSequencePacket(packet);
              }
            } else {
              if ((pcb != null) && (data != null)) {
                // make sure we havent already processed packet
                queueForCallbackProcessing(pcb, con, packet);
              }
            }
          } else {
            if (Log.loggingNet) Log.net(con.toString() + " already seen this packet");
          }

          // is this the next sequential packet
          if (packet.getSeqNum() == (rcvCur + 1)) {
            con.setRcvCur(rcvCur + 1);
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection RCVD: incremented last sequenced rcvd: "
                      + (rcvCur + 1));

            // packet in order - dont add to eack
            // Take any additional sequential packets off eack
            long seqNum = rcvCur + 2;
            while (con.removeEack(seqNum)) {
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: removing/collapsing eack: " + seqNum);
              con.setRcvCur(seqNum++);
            }

            if (con.isSequenced()) {
              rcvCur++; // since we just process the last one
              Log.net(
                  "RDPServer.processExistingConnection: connection is sequenced, processing collapsed packets.");
              // send any saved sequential packets also
              Iterator iter = con.getSequencePackets().iterator();
              while (iter.hasNext()) {
                RDPPacket p = (RDPPacket) iter.next();
                if (Log.loggingNet)
                  Log.net(
                      "rdpserver: stored packet seqnum="
                          + p.getSeqNum()
                          + ", if equal to (rcvcur + 1)="
                          + (rcvCur + 1));
                if (p.getSeqNum() == (rcvCur + 1)) {
                  Log.net(
                      "RDPServer.processExistingConnection: this is the next packet, processing");
                  // this is the next packet - update rcvcur
                  rcvCur++;

                  // process this packet
                  Log.net(
                      "RDPServer.processExistingConnection: processing stored sequential packet "
                          + p);
                  byte[] storedData = p.getData();
                  if (pcb != null && storedData != null) {
                    queueForCallbackProcessing(pcb, con, packet);
                  }
                  iter.remove();
                }
              }
            } else {
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: connection is not sequenced");
            }
          } else {
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection: RCVD OUT OF ORDER: packet seq#: "
                      + packet.getSeqNum()
                      + ", but last sequential rcvd packet was: "
                      + con.getRcvCur()
                      + " -- not incrementing counter");
            if (packet.getSeqNum() > rcvCur) {
              // must be at least + 2 larger than rcvCur
              if (Log.loggingNet) Log.net("adding to eack list " + packet);
              con.addEack(packet);
            }
          }
        } finally {
          con.getLock().unlock();
        }
        return true;
      }
    }
    return false;
  }
Esempio n. 15
0
    public void run() {
      // every second, go through all the packets that havent been
      // ack'd
      List<RDPConnection> conList = new LinkedList<RDPConnection>();
      long lastCounterTime = System.currentTimeMillis();
      while (true) {
        try {
          long startTime = System.currentTimeMillis();
          long interval = startTime - lastCounterTime;
          if (interval > 1000) {

            if (Log.loggingNet) {
              Log.net(
                  "RDPServer counters: activeChannelCalls "
                      + activeChannelCalls
                      + ", selectCalls "
                      + selectCalls
                      + ", transmits "
                      + transmits
                      + ", retransmits "
                      + retransmits
                      + " in "
                      + interval
                      + "ms");
            }
            activeChannelCalls = 0;
            selectCalls = 0;
            transmits = 0;
            retransmits = 0;
            lastCounterTime = startTime;
          }
          if (Log.loggingNet) Log.net("RDPServer.RETRY: startTime=" + startTime);

          // go through all the rdpconnections and re-send any
          // unacked packets
          conList.clear();

          lock.lock();
          try {
            // make a copy since the values() collection is
            // backed by the map
            Set<RDPConnection> conCol = RDPServer.getAllConnections();
            if (conCol == null) {
              throw new MVRuntimeException("values() returned null");
            }
            conList.addAll(conCol); // make non map backed copy
          } finally {
            lock.unlock();
          }

          Iterator<RDPConnection> iter = conList.iterator();
          while (iter.hasNext()) {
            RDPConnection con = iter.next();
            long currentTime = System.currentTimeMillis();

            // is the connection in CLOSE_WAIT
            if (con.getState() == RDPConnection.CLOSE_WAIT) {
              long closeTime = con.getCloseWaitTimer();
              long elapsedTime = currentTime - closeTime;
              Log.net(
                  "RDPRetryThread: con is in CLOSE_WAIT: elapsed close timer(ms)="
                      + elapsedTime
                      + ", waiting for 30seconds to elapse. con="
                      + con);
              if (elapsedTime > 30000) {
                // close the connection
                Log.net("RDPRetryThread: removing CLOSE_WAIT connection. con=" + con);
                removeConnection(con);
              } else {
                Log.net(
                    "RDPRetryThread: time left on CLOSE_WAIT timer: "
                        + (30000 - (currentTime - closeTime)));
              }
              // con.close();
              continue;
            }
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.RETRY: resending expired packets "
                      + con
                      + " - current list size = "
                      + con.unackListSize());

            // see if we should send a null packet, but only if con is already open
            if ((con.getState() == RDPConnection.OPEN)
                && ((currentTime - con.getLastNullPacketTime()) > 30000)) {
              con.getLock().lock();
              try {
                RDPPacket nulPacket = RDPPacket.makeNulPacket();
                con.sendPacketImmediate(nulPacket, false);
                con.setLastNullPacketTime();
                if (Log.loggingNet) Log.net("RDPServer.retry: sent nul packet: " + nulPacket);
              } finally {
                con.getLock().unlock();
              }
            } else {
              if (Log.loggingNet)
                Log.net(
                    "RDPServer.retry: sending nul packet in "
                        + (30000 - (currentTime - con.getLastNullPacketTime())));
            }
            con.resend(
                currentTime - resendTimerMS, // resend cutoff time
                currentTime - resendTimeoutMS); // giveup time
          }

          long endTime = System.currentTimeMillis();
          if (Log.loggingNet)
            Log.net(
                "RDPServer.RETRY: endTime=" + endTime + ", elapse(ms)=" + (endTime - startTime));
          Thread.sleep(250);
        } catch (Exception e) {
          Log.exception("RDPServer.RetryThread.run caught exception", e);
        }
      }
    }
  /**
   * @param updateSeq Update sequence.
   * @return Checks if any of the local partitions need to be evicted.
   */
  private boolean checkEvictions(long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();

    boolean changed = false;

    UUID locId = cctx.nodeId();

    for (GridDhtLocalPartition part : locParts.values()) {
      GridDhtPartitionState state = part.state();

      if (state.active()) {
        int p = part.id();

        List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

        if (!affNodes.contains(cctx.localNode())) {
          Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING));

          // If all affinity nodes are owners, then evict partition from local node.
          if (nodeIds.containsAll(F.nodeIds(affNodes))) {
            part.rent(false);

            updateLocal(part.id(), locId, part.state(), updateSeq);

            changed = true;

            if (log.isDebugEnabled())
              log.debug("Evicted local partition (all affinity nodes are owners): " + part);
          } else {
            int ownerCnt = nodeIds.size();
            int affCnt = affNodes.size();

            if (ownerCnt > affCnt) {
              List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds));

              // Sort by node orders in ascending order.
              Collections.sort(sorted, CU.nodeComparator(true));

              int diff = sorted.size() - affCnt;

              for (int i = 0; i < diff; i++) {
                ClusterNode n = sorted.get(i);

                if (locId.equals(n.id())) {
                  part.rent(false);

                  updateLocal(part.id(), locId, part.state(), updateSeq);

                  changed = true;

                  if (log.isDebugEnabled())
                    log.debug(
                        "Evicted local partition (this node is oldest non-affinity node): " + part);

                  break;
                }
              }
            }
          }
        }
      }
    }

    return changed;
  }