/** Checks consistency after all operations. */
  private void consistencyCheck() {
    if (CONSISTENCY_CHECK) {
      assert lock.writeLock().isHeldByCurrentThread();

      if (node2part == null) return;

      for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> nodeIds = part2node.get(p);

          assert nodeIds != null
              : "Failed consistency check [part=" + p + ", nodeId=" + e.getKey() + ']';
          assert nodeIds.contains(e.getKey())
              : "Failed consistency check [part="
                  + p
                  + ", nodeId="
                  + e.getKey()
                  + ", nodeIds="
                  + nodeIds
                  + ']';
        }
      }

      for (Map.Entry<Integer, Set<UUID>> e : part2node.entrySet()) {
        for (UUID nodeId : e.getValue()) {
          GridDhtPartitionMap map = node2part.get(nodeId);

          assert map != null
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
          assert map.containsKey(e.getKey())
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
        }
      }
    }
  }
Esempio n. 2
0
 /**
  * a DatagramChannel has data ready - process all the pending packets, whether its for a
  * rdpserversocket or rdpconnection.
  */
 void processActiveChannel(DatagramChannel dc) throws ClosedChannelException {
   RDPPacket packet;
   int count = 0;
   // read in the packet
   try {
     Set<RDPConnection> needsAckConnections = new HashSet<RDPConnection>();
     while ((packet = RDPServer.receivePacket(dc)) != null) {
       if (Log.loggingNet)
         Log.net(
             "RDPServer.processActiveChannel: Starting iteration with count of "
                 + count
                 + " packets");
       // see if there is a connection already for this packet
       InetAddress remoteAddr = packet.getInetAddress();
       int remotePort = packet.getPort();
       int localPort = dc.socket().getLocalPort();
       ConnectionInfo conInfo = new ConnectionInfo(remoteAddr, remotePort, localPort);
       RDPConnection con = RDPServer.getConnection(dc, conInfo);
       if (con != null) {
         if (Log.loggingNet)
           Log.net("RDPServer.processActiveChannel: found an existing connection: " + con);
         count++;
         if (processExistingConnection(con, packet)) needsAckConnections.add(con);
         // Prevent this from blocking getActiveChannels by
         // putting an upper bound on the number of packets
         // processed
         if (count >= 20) break;
         continue;
       } else {
         Log.net("RDPServer.processActiveChannel: did not find an existing connection");
       }
       // there is no connection,
       // see if there is a socket listening for new connection
       RDPServerSocket rdpSocket = RDPServer.getRDPSocket(dc);
       if (rdpSocket != null) {
         count++;
         processNewConnection(rdpSocket, packet);
         return;
       }
       return;
     }
     // Finally, send out the acks
     for (RDPConnection con : needsAckConnections) {
       RDPPacket replyPacket = new RDPPacket(con);
       con.sendPacketImmediate(replyPacket, false);
     }
   } catch (ClosedChannelException ex) {
     Log.error("RDPServer.processActiveChannel: ClosedChannel " + dc.socket());
     throw ex;
   } finally {
     if (Log.loggingNet)
       Log.net("RDPServer.processActiveChannel: Returning after processing " + count + " packets");
   }
 }
Esempio n. 3
0
 static Set<RDPConnection> getAllConnections() {
   lock.lock();
   try {
     Set<RDPConnection> allCon = new HashSet<RDPConnection>();
     Iterator<Map<ConnectionInfo, RDPConnection>> iter = allConMap.values().iterator();
     while (iter.hasNext()) {
       Map<ConnectionInfo, RDPConnection> dcMap = iter.next();
       allCon.addAll(dcMap.values());
     }
     return allCon;
   } finally {
     lock.unlock();
   }
 }
  private boolean trashConnection(Connection connection) {
    // First, make sure we don't go below core connections
    for (; ; ) {
      int opened = open.get();
      if (opened <= options().getCoreConnectionsPerHost(hostDistance)) return false;

      if (open.compareAndSet(opened, opened - 1)) break;
    }
    trash.add(connection);
    connections.remove(connection);

    if (connection.inFlight.get() == 0 && trash.remove(connection)) close(connection);
    return true;
  }
  public void returnConnection(Connection connection) {
    int inFlight = connection.inFlight.decrementAndGet();

    if (connection.isDefunct()) {
      if (host.getMonitor().signalConnectionFailure(connection.lastException())) shutdown();
      else replace(connection);
    } else {

      if (trash.contains(connection) && inFlight == 0) {
        if (trash.remove(connection)) close(connection);
        return;
      }

      if (connections.size() > options().getCoreConnectionsPerHost(hostDistance)
          && inFlight <= options().getMinSimultaneousRequestsPerConnectionThreshold(hostDistance)) {
        trashConnection(connection);
      } else {
        signalAvailableConnection();
      }
    }
  }
  /** @param nodeId Node to remove. */
  private void removeNode(UUID nodeId) {
    assert nodeId != null;
    assert lock.writeLock().isHeldByCurrentThread();

    ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer);

    assert oldest != null;

    ClusterNode loc = cctx.localNode();

    if (node2part != null) {
      if (oldest.equals(loc) && !node2part.nodeId().equals(loc.id())) {
        updateSeq.setIfGreater(node2part.updateSequence());

        node2part =
            new GridDhtPartitionFullMap(
                loc.id(), loc.order(), updateSeq.incrementAndGet(), node2part, false);
      } else node2part = new GridDhtPartitionFullMap(node2part, node2part.updateSequence());

      part2node = new HashMap<>(part2node);

      GridDhtPartitionMap parts = node2part.remove(nodeId);

      if (parts != null) {
        for (Integer p : parts.keySet()) {
          Set<UUID> nodeIds = part2node.get(p);

          if (nodeIds != null) {
            nodeIds.remove(nodeId);

            if (nodeIds.isEmpty()) part2node.remove(p);
          }
        }
      }

      consistencyCheck();
    }
  }
Esempio n. 7
0
 /** starts the server listens to incoming packets */
 public void run() {
   try {
     while (true) {
       if (Log.loggingNet) Log.net("In RDPServer.run: starting new iteration");
       try {
         Set<DatagramChannel> activeChannels = getActiveChannels();
         activeChannelCalls++;
         Iterator<DatagramChannel> iter = activeChannels.iterator();
         while (iter.hasNext()) {
           DatagramChannel dc = iter.next();
           if (Log.loggingNet) Log.net("In RDPServer.run: about to call processActiveChannel");
           processActiveChannel(dc);
           if (Log.loggingNet) Log.net("In RDPServer.run: returned from processActiveChannel");
         }
       } catch (ClosedChannelException ex) {
         // ignore
       } catch (Exception e) {
         Log.exception("RDPServer.run caught exception", e);
       }
     }
   } finally {
     Log.warn("RDPServer.run: thread exiting");
   }
 }
Esempio n. 8
0
  /** returns a list of rdpserversockets */
  Set<DatagramChannel> getActiveChannels() throws InterruptedException, java.io.IOException {
    lock.lock();
    try {
      while (channelMap.isEmpty()) {
        channelMapNotEmpty.await();
      }
    } finally {
      lock.unlock();
    }

    Set<SelectionKey> readyKeys = null;
    do {
      lock.lock();
      try {
        if (!newChannelSet.isEmpty()) {
          if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: newChannelSet is not null");
          Iterator<DatagramChannel> iter = newChannelSet.iterator();
          while (iter.hasNext()) {
            DatagramChannel newDC = iter.next();
            iter.remove();
            newDC.register(selector, SelectionKey.OP_READ);
          }
        }
      } finally {
        lock.unlock();
      }
      int numReady = selector.select(); // this is a blocking call - thread safe
      selectCalls++;
      if (numReady == 0) {
        if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: selector returned 0");
        continue;
      }
      readyKeys = selector.selectedKeys();
      if (Log.loggingNet)
        Log.net(
            "RDPServer.getActiveChannels: called select - # of ready keys = "
                + readyKeys.size()
                + " == "
                + numReady);
    } while (readyKeys == null || readyKeys.isEmpty());

    lock.lock();
    try {
      // get a datagramchannel that is ready
      Set<DatagramChannel> activeChannels = new HashSet<DatagramChannel>();

      Iterator<SelectionKey> iter = readyKeys.iterator();
      while (iter.hasNext()) {
        SelectionKey key = iter.next();
        if (Log.loggingNet)
          Log.net(
              "RDPServer.getActiveChannels: matched selectionkey: "
                  + key
                  + ", isAcceptable="
                  + key.isAcceptable()
                  + ", isReadable="
                  + key.isReadable()
                  + ", isValid="
                  + key.isValid()
                  + ", isWritable="
                  + key.isWritable());
        iter.remove(); // remove from the selected key list

        if (!key.isReadable() || !key.isValid()) {
          Log.error(
              "RDPServer.getActiveChannels: Throwing exception: RDPServer: not readable or invalid");
          throw new MVRuntimeException("RDPServer: not readable or invalid");
        }

        DatagramChannel dc = (DatagramChannel) key.channel();
        activeChannels.add(dc);
      }
      if (Log.loggingNet)
        Log.net(
            "RDPServer.getActiveChannels: returning " + activeChannels.size() + " active channels");
      return activeChannels;
    } finally {
      lock.unlock();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }
  /**
   * Updates value for single partition.
   *
   * @param p Partition.
   * @param nodeId Node ID.
   * @param state State.
   * @param updateSeq Update sequence.
   */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();
    assert nodeId.equals(cctx.nodeId());

    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer);

    assert oldest != null;

    // If this node became the oldest node.
    if (oldest.id().equals(cctx.nodeId())) {
      long seq = node2part.updateSequence();

      if (seq != updateSeq) {
        if (seq > updateSeq) {
          if (this.updateSeq.get() < seq) {
            // Update global counter if necessary.
            boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);

            assert b
                : "Invalid update sequence [updateSeq="
                    + updateSeq
                    + ", seq="
                    + seq
                    + ", curUpdateSeq="
                    + this.updateSeq.get()
                    + ", node2part="
                    + node2part.toFullString()
                    + ']';

            updateSeq = seq + 1;
          } else updateSeq = seq;
        }

        node2part.updateSequence(updateSeq);
      }
    }

    GridDhtPartitionMap map = node2part.get(nodeId);

    if (map == null)
      node2part.put(
          nodeId,
          map =
              new GridDhtPartitionMap(
                  nodeId,
                  updateSeq,
                  Collections.<Integer, GridDhtPartitionState>emptyMap(),
                  false));

    map.updateSequence(updateSeq);

    map.put(p, state);

    Set<UUID> ids = part2node.get(p);

    if (ids == null) part2node.put(p, ids = U.newHashSet(3));

    ids.add(nodeId);
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');

    if (!cctx.discovery().alive(parts.nodeId())) {
      if (log.isDebugEnabled())
        log.debug(
            "Received partition update for non-existing node (will ignore) [exchId="
                + exchId
                + ", parts="
                + parts
                + ']');

      return null;
    }

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (lastExchangeId != null && exchId != null && lastExchangeId.compareTo(exchId) > 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for single partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (exchId != null) lastExchangeId = exchId;

      if (node2part == null)
        // Create invalid partition map.
        node2part = new GridDhtPartitionFullMap();

      GridDhtPartitionMap cur = node2part.get(parts.nodeId());

      if (cur != null && cur.updateSequence() >= parts.updateSequence()) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale update sequence for single partition map update (will ignore) [exchId="
                  + exchId
                  + ", curSeq="
                  + cur.updateSequence()
                  + ", newSeq="
                  + parts.updateSequence()
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      node2part = new GridDhtPartitionFullMap(node2part, updateSeq);

      boolean changed = false;

      if (cur == null || !cur.equals(parts)) changed = true;

      node2part.put(parts.nodeId(), parts);

      part2node = new HashMap<>(part2node);

      // Add new mappings.
      for (Integer p : parts.keySet()) {
        Set<UUID> ids = part2node.get(p);

        if (ids == null)
          // Initialize HashSet to size 3 in anticipation that there won't be
          // more than 3 nodes per partition.
          part2node.put(p, ids = U.newHashSet(3));

        changed |= ids.add(parts.nodeId());
      }

      // Remove obsolete mappings.
      if (cur != null) {
        for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
          Set<UUID> ids = part2node.get(p);

          if (ids != null) changed |= ids.remove(parts.nodeId());
        }
      }

      changed |= checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after single update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }