コード例 #1
0
  /**
   * Waits for renting partitions.
   *
   * @return {@code True} if mapping was changed.
   * @throws IgniteCheckedException If failed.
   */
  private boolean waitForRent() throws IgniteCheckedException {
    boolean changed = false;

    // Synchronously wait for all renting partitions to complete.
    for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) {
      GridDhtLocalPartition p = it.next();

      GridDhtPartitionState state = p.state();

      if (state == RENTING || state == EVICTED) {
        if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p);

        // Wait for partition to empty out.
        p.rent(true).get();

        if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p);

        // Remove evicted partition.
        it.remove();

        changed = true;
      }
    }

    return changed;
  }
コード例 #2
0
 public void remove() {
   if (lastRet < 0) throw new IllegalStateException();
   Object x = array[lastRet];
   lastRet = -1;
   // Traverse underlying queue to find == element,
   // not just a .equals element.
   lock.lock();
   try {
     for (Iterator it = q.iterator(); it.hasNext(); ) {
       if (it.next() == x) {
         it.remove();
         return;
       }
     }
   } finally {
     lock.unlock();
   }
 }
コード例 #3
0
 /** starts the server listens to incoming packets */
 public void run() {
   try {
     while (true) {
       if (Log.loggingNet) Log.net("In RDPServer.run: starting new iteration");
       try {
         Set<DatagramChannel> activeChannels = getActiveChannels();
         activeChannelCalls++;
         Iterator<DatagramChannel> iter = activeChannels.iterator();
         while (iter.hasNext()) {
           DatagramChannel dc = iter.next();
           if (Log.loggingNet) Log.net("In RDPServer.run: about to call processActiveChannel");
           processActiveChannel(dc);
           if (Log.loggingNet) Log.net("In RDPServer.run: returned from processActiveChannel");
         }
       } catch (ClosedChannelException ex) {
         // ignore
       } catch (Exception e) {
         Log.exception("RDPServer.run caught exception", e);
       }
     }
   } finally {
     Log.warn("RDPServer.run: thread exiting");
   }
 }
コード例 #4
0
  /** Clears values for this partition. */
  private void clearAll() {
    GridCacheVersion clearVer = cctx.versions().next();

    boolean swap = cctx.isSwapOrOffheapEnabled();

    boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);

    Iterator<GridDhtCacheEntry> it = map.values().iterator();

    GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null;

    if (swap
        && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values.
      Iterator<GridDhtCacheEntry> unswapIt = null;

      try {
        swapIt = cctx.swap().iterator(id);
        unswapIt = unswapIterator(swapIt);
      } catch (Exception e) {
        U.error(log, "Failed to clear swap for evicted partition: " + this, e);
      }

      if (unswapIt != null) it = F.concat(it, unswapIt);
    }

    try {
      while (it.hasNext()) {
        GridDhtCacheEntry cached = it.next();

        try {
          if (cached.clearInternal(clearVer, swap)) {
            map.remove(cached.key(), cached);

            if (!cached.isInternal()) {
              mapPubSize.decrement();

              if (rec)
                cctx.events()
                    .addEvent(
                        cached.partition(),
                        cached.key(),
                        cctx.localNodeId(),
                        (IgniteUuid) null,
                        null,
                        EVT_CACHE_REBALANCE_OBJECT_UNLOADED,
                        null,
                        false,
                        cached.rawGet(),
                        cached.hasValue(),
                        null,
                        null,
                        null);
            }
          }
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e);
        }
      }
    } finally {
      U.close(swapIt, log);
    }
  }
コード例 #5
0
  /**
   * Creates an <tt>ArrayBlockingQueue</tt> with the given (fixed) capacity, the specified access
   * policy and initially containing the elements of the given collection, added in traversal order
   * of the collection's iterator.
   *
   * @param capacity the capacity of this queue
   * @param fair if <tt>true</tt> then queue accesses for threads blocked on insertion or removal,
   *     are processed in FIFO order; if <tt>false</tt> the access order is unspecified.
   * @param c the collection of elements to initially contain
   * @throws IllegalArgumentException if <tt>capacity</tt> is less than <tt>c.size()</tt>, or less
   *     than 1.
   * @throws NullPointerException if the specified collection or any of its elements are null
   */
  public ArrayBlockingQueue(int capacity, boolean fair, Collection<? extends E> c) {
    this(capacity, fair);
    if (capacity < c.size()) throw new IllegalArgumentException();

    for (Iterator<? extends E> it = c.iterator(); it.hasNext(); ) add(it.next());
  }
コード例 #6
0
  /**
   * we have a packet that belongs to the passed in connection. process the packet for the
   * connection. It returns true if the connection is open and the packet was a data packet
   */
  boolean processExistingConnection(RDPConnection con, RDPPacket packet) {

    if (Log.loggingNet)
      Log.net("RDPServer.processExistingConnection: con state=" + con + ", packet=" + packet);
    packetCounter.add();

    int state = con.getState();
    if (state == RDPConnection.LISTEN) {
      // something is wrong, we shouldn't be here
      // we get to this method after looking in the connections map
      // but all LISTEN connections should be listed direct
      // from serversockets
      Log.error("RDPServer.processExistingConnection: connection shouldnt be in LISTEN state");
      return false;
    }
    if (state == RDPConnection.SYN_SENT) {
      if (!packet.isAck()) {
        Log.warn("got a non-ack packet when we're in SYN_SENT");
        return false;
      }
      if (!packet.isSyn()) {
        Log.warn("got a non-syn packet when we're in SYN_SENT");
        return false;
      }
      if (Log.loggingNet) Log.net("good: got syn-ack packet in syn_sent");

      // make sure its acking our initial segment #
      if (packet.getAckNum() != con.getInitialSendSeqNum()) {
        if (Log.loggingNet) Log.net("syn's ack number does not match initial seq #");
        return false;
      }

      con.setRcvCur(packet.getSeqNum());
      con.setRcvIrs(packet.getSeqNum());
      con.setMaxSendUnacks(packet.getSendUnacks());
      con.setMaxReceiveSegmentSize(packet.getMaxRcvSegmentSize());
      con.setSendUnackd(packet.getAckNum() + 1);

      // ack first before setting state to open
      // otherwise some other thread will get woken up and send data
      // before we send the ack
      if (Log.loggingNet) Log.net("new connection state: " + con);
      RDPPacket replyPacket = new RDPPacket(con);
      con.sendPacketImmediate(replyPacket, false);
      con.setState(RDPConnection.OPEN);
      return false;
    }
    if (state == RDPConnection.SYN_RCVD) {
      if (packet.getSeqNum() <= con.getRcvIrs()) {
        Log.error("seqnum is not above rcv initial seq num");
        return false;
      }
      if (packet.getSeqNum() > (con.getRcvCur() + (con.getRcvMax() * 2))) {
        Log.error("seqnum is too big");
        return false;
      }
      if (packet.isAck()) {
        if (packet.getAckNum() == con.getInitialSendSeqNum()) {
          if (Log.loggingNet) Log.net("got ack for our syn - setting state to open");
          con.setState(RDPConnection.OPEN); // this will notify()

          // call the accept callback
          // first find the serversocket
          DatagramChannel dc = con.getDatagramChannel();
          if (dc == null) {
            throw new MVRuntimeException(
                "RDPServer.processExistingConnection: no datagramchannel for connection that just turned OPEN");
          }
          RDPServerSocket rdpSocket = RDPServer.getRDPSocket(dc);
          if (rdpSocket == null) {
            throw new MVRuntimeException(
                "RDPServer.processExistingConnection: no socket for connection that just turned OPEN");
          }
          ClientConnection.AcceptCallback acceptCB = rdpSocket.getAcceptCallback();
          if (acceptCB != null) {
            acceptCB.acceptConnection(con);
          } else {
            Log.warn("serversocket has no accept callback");
          }
          if (Log.loggingNet)
            Log.net(
                "RDPServer.processExistingConnection: got ACK, removing from unack list: "
                    + packet.getSeqNum());
          con.removeUnackPacket(packet.getSeqNum());
        }
      }
    }
    if (state == RDPConnection.CLOSE_WAIT) {
      // reply with a reset on all packets
      if (!packet.isRst()) {
        RDPPacket rstPacket = RDPPacket.makeRstPacket();
        con.sendPacketImmediate(rstPacket, false);
      }
    }
    if (state == RDPConnection.OPEN) {
      if (packet.isRst()) {
        // the other side wants to close the connection
        // set the state,
        // dont call con.close() since that will send a reset packet
        if (Log.loggingDebug)
          Log.debug("RDPServer.processExistingConnection: got reset packet for con " + con);
        if (con.getState() != RDPConnection.CLOSE_WAIT) {
          con.setState(RDPConnection.CLOSE_WAIT);
          con.setCloseWaitTimer();
          // Only invoke callback when moving into CLOSE_WAIT
          // state.  This prevents two calls to connectionReset.
          Log.net("RDPServer.processExistingConnection: calling reset callback");
          ClientConnection.MessageCallback pcb = con.getCallback();
          pcb.connectionReset(con);
        }

        return false;
      }
      if (packet.isSyn()) {
        // this will close the connection (put into CLOSE_WAIT)
        // send a reset packet and call the connectionReset callback
        Log.error(
            "RDPServer.processExistingConnection: closing connection because we got a syn packet, con="
                + con);
        con.close();
        return false;
      }

      // TODO: shouldnt it be ok for it to have same seq num?
      // if it is a 0 data packet?
      long rcvCur = con.getRcvCur();
      if (packet.getSeqNum() <= rcvCur) {
        if (Log.loggingNet)
          Log.net("RDPServer.processExistingConnection: seqnum too small - acking/not process");
        if (packet.getData() != null) {
          if (Log.loggingNet)
            Log.net(
                "RDPServer.processExistingConnection: sending ack even though seqnum out of range");
          RDPPacket replyPacket = new RDPPacket(con);
          con.sendPacketImmediate(replyPacket, false);
        }
        return false;
      }
      if (packet.getSeqNum() > (rcvCur + (con.getRcvMax() * 2))) {
        Log.error("RDPServer.processExistingConnection: seqnum too big - discarding");
        return false;
      }
      if (packet.isAck()) {
        if (Log.loggingNet)
          Log.net("RDPServer.processExistingConnection: processing ack " + packet.getAckNum());
        // lock for race condition (read then set)
        con.getLock().lock();
        try {
          if (packet.getAckNum() >= con.getSendNextSeqNum()) {
            // acking something we didnt even send yet
            Log.error(
                "RDPServer.processExistingConnection: discarding -- got ack #"
                    + packet.getAckNum()
                    + ", but our next send seqnum is "
                    + con.getSendNextSeqNum()
                    + " -- "
                    + con);
            return false;
          }
          if (con.getSendUnackd() <= packet.getAckNum()) {
            con.setSendUnackd(packet.getAckNum() + 1);
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection: updated send_unackd num to "
                      + con.getSendUnackd()
                      + " (one greater than packet ack) - "
                      + con);
            con.removeUnackPacketUpTo(packet.getAckNum());
          }
          if (packet.isEak()) {
            List eackList = packet.getEackList();
            Iterator iter = eackList.iterator();
            while (iter.hasNext()) {
              Long seqNum = (Long) iter.next();
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: got EACK: " + seqNum);
              con.removeUnackPacket(seqNum.longValue());
            }
          }
        } finally {
          con.getLock().unlock();
          if (Log.loggingNet)
            Log.net("RDPServer.processExistingConnection: processed ack " + packet.getAckNum());
        }
      }
      // process the data
      byte[] data = packet.getData();
      if ((data != null) || packet.isNul()) {
        dataCounter.add();

        // lock - since racecondition: we read then set
        con.getLock().lock();
        try {
          rcvCur = con.getRcvCur(); // update rcvCur
          if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: rcvcur is " + rcvCur);

          ClientConnection.MessageCallback pcb = con.getCallback();
          if (pcb == null) {
            Log.warn("RDPServer.processExistingConnection: no packet callback registered");
          }

          // call callback only if we havent seen it already - eackd
          if (!con.hasEack(packet.getSeqNum())) {
            if (con.isSequenced()) {
              // this is a sequential connection,
              // make sure this is the 'next' packet
              // is this the next sequential packet
              if (packet.getSeqNum() == (rcvCur + 1)) {
                // this is the next packet
                if (Log.loggingNet)
                  Log.net(
                      "RDPServer.processExistingConnection: conn is sequenced and received next packet, rcvCur="
                          + rcvCur
                          + ", packet="
                          + packet);
                if ((pcb != null) && (data != null)) {
                  queueForCallbackProcessing(pcb, con, packet);
                }
              } else {
                // not the next packet, place it in queue
                if (Log.loggingNet)
                  Log.net(
                      "RDPServer.processExistingConnection: conn is sequenced, BUT PACKET is OUT OF ORDER: rcvcur="
                          + rcvCur
                          + ", packet="
                          + packet);
                con.addSequencePacket(packet);
              }
            } else {
              if ((pcb != null) && (data != null)) {
                // make sure we havent already processed packet
                queueForCallbackProcessing(pcb, con, packet);
              }
            }
          } else {
            if (Log.loggingNet) Log.net(con.toString() + " already seen this packet");
          }

          // is this the next sequential packet
          if (packet.getSeqNum() == (rcvCur + 1)) {
            con.setRcvCur(rcvCur + 1);
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection RCVD: incremented last sequenced rcvd: "
                      + (rcvCur + 1));

            // packet in order - dont add to eack
            // Take any additional sequential packets off eack
            long seqNum = rcvCur + 2;
            while (con.removeEack(seqNum)) {
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: removing/collapsing eack: " + seqNum);
              con.setRcvCur(seqNum++);
            }

            if (con.isSequenced()) {
              rcvCur++; // since we just process the last one
              Log.net(
                  "RDPServer.processExistingConnection: connection is sequenced, processing collapsed packets.");
              // send any saved sequential packets also
              Iterator iter = con.getSequencePackets().iterator();
              while (iter.hasNext()) {
                RDPPacket p = (RDPPacket) iter.next();
                if (Log.loggingNet)
                  Log.net(
                      "rdpserver: stored packet seqnum="
                          + p.getSeqNum()
                          + ", if equal to (rcvcur + 1)="
                          + (rcvCur + 1));
                if (p.getSeqNum() == (rcvCur + 1)) {
                  Log.net(
                      "RDPServer.processExistingConnection: this is the next packet, processing");
                  // this is the next packet - update rcvcur
                  rcvCur++;

                  // process this packet
                  Log.net(
                      "RDPServer.processExistingConnection: processing stored sequential packet "
                          + p);
                  byte[] storedData = p.getData();
                  if (pcb != null && storedData != null) {
                    queueForCallbackProcessing(pcb, con, packet);
                  }
                  iter.remove();
                }
              }
            } else {
              if (Log.loggingNet)
                Log.net("RDPServer.processExistingConnection: connection is not sequenced");
            }
          } else {
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.processExistingConnection: RCVD OUT OF ORDER: packet seq#: "
                      + packet.getSeqNum()
                      + ", but last sequential rcvd packet was: "
                      + con.getRcvCur()
                      + " -- not incrementing counter");
            if (packet.getSeqNum() > rcvCur) {
              // must be at least + 2 larger than rcvCur
              if (Log.loggingNet) Log.net("adding to eack list " + packet);
              con.addEack(packet);
            }
          }
        } finally {
          con.getLock().unlock();
        }
        return true;
      }
    }
    return false;
  }
コード例 #7
0
  /** returns a list of rdpserversockets */
  Set<DatagramChannel> getActiveChannels() throws InterruptedException, java.io.IOException {
    lock.lock();
    try {
      while (channelMap.isEmpty()) {
        channelMapNotEmpty.await();
      }
    } finally {
      lock.unlock();
    }

    Set<SelectionKey> readyKeys = null;
    do {
      lock.lock();
      try {
        if (!newChannelSet.isEmpty()) {
          if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: newChannelSet is not null");
          Iterator<DatagramChannel> iter = newChannelSet.iterator();
          while (iter.hasNext()) {
            DatagramChannel newDC = iter.next();
            iter.remove();
            newDC.register(selector, SelectionKey.OP_READ);
          }
        }
      } finally {
        lock.unlock();
      }
      int numReady = selector.select(); // this is a blocking call - thread safe
      selectCalls++;
      if (numReady == 0) {
        if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: selector returned 0");
        continue;
      }
      readyKeys = selector.selectedKeys();
      if (Log.loggingNet)
        Log.net(
            "RDPServer.getActiveChannels: called select - # of ready keys = "
                + readyKeys.size()
                + " == "
                + numReady);
    } while (readyKeys == null || readyKeys.isEmpty());

    lock.lock();
    try {
      // get a datagramchannel that is ready
      Set<DatagramChannel> activeChannels = new HashSet<DatagramChannel>();

      Iterator<SelectionKey> iter = readyKeys.iterator();
      while (iter.hasNext()) {
        SelectionKey key = iter.next();
        if (Log.loggingNet)
          Log.net(
              "RDPServer.getActiveChannels: matched selectionkey: "
                  + key
                  + ", isAcceptable="
                  + key.isAcceptable()
                  + ", isReadable="
                  + key.isReadable()
                  + ", isValid="
                  + key.isValid()
                  + ", isWritable="
                  + key.isWritable());
        iter.remove(); // remove from the selected key list

        if (!key.isReadable() || !key.isValid()) {
          Log.error(
              "RDPServer.getActiveChannels: Throwing exception: RDPServer: not readable or invalid");
          throw new MVRuntimeException("RDPServer: not readable or invalid");
        }

        DatagramChannel dc = (DatagramChannel) key.channel();
        activeChannels.add(dc);
      }
      if (Log.loggingNet)
        Log.net(
            "RDPServer.getActiveChannels: returning " + activeChannels.size() + " active channels");
      return activeChannels;
    } finally {
      lock.unlock();
    }
  }
コード例 #8
0
    public void run() {
      // every second, go through all the packets that havent been
      // ack'd
      List<RDPConnection> conList = new LinkedList<RDPConnection>();
      long lastCounterTime = System.currentTimeMillis();
      while (true) {
        try {
          long startTime = System.currentTimeMillis();
          long interval = startTime - lastCounterTime;
          if (interval > 1000) {

            if (Log.loggingNet) {
              Log.net(
                  "RDPServer counters: activeChannelCalls "
                      + activeChannelCalls
                      + ", selectCalls "
                      + selectCalls
                      + ", transmits "
                      + transmits
                      + ", retransmits "
                      + retransmits
                      + " in "
                      + interval
                      + "ms");
            }
            activeChannelCalls = 0;
            selectCalls = 0;
            transmits = 0;
            retransmits = 0;
            lastCounterTime = startTime;
          }
          if (Log.loggingNet) Log.net("RDPServer.RETRY: startTime=" + startTime);

          // go through all the rdpconnections and re-send any
          // unacked packets
          conList.clear();

          lock.lock();
          try {
            // make a copy since the values() collection is
            // backed by the map
            Set<RDPConnection> conCol = RDPServer.getAllConnections();
            if (conCol == null) {
              throw new MVRuntimeException("values() returned null");
            }
            conList.addAll(conCol); // make non map backed copy
          } finally {
            lock.unlock();
          }

          Iterator<RDPConnection> iter = conList.iterator();
          while (iter.hasNext()) {
            RDPConnection con = iter.next();
            long currentTime = System.currentTimeMillis();

            // is the connection in CLOSE_WAIT
            if (con.getState() == RDPConnection.CLOSE_WAIT) {
              long closeTime = con.getCloseWaitTimer();
              long elapsedTime = currentTime - closeTime;
              Log.net(
                  "RDPRetryThread: con is in CLOSE_WAIT: elapsed close timer(ms)="
                      + elapsedTime
                      + ", waiting for 30seconds to elapse. con="
                      + con);
              if (elapsedTime > 30000) {
                // close the connection
                Log.net("RDPRetryThread: removing CLOSE_WAIT connection. con=" + con);
                removeConnection(con);
              } else {
                Log.net(
                    "RDPRetryThread: time left on CLOSE_WAIT timer: "
                        + (30000 - (currentTime - closeTime)));
              }
              // con.close();
              continue;
            }
            if (Log.loggingNet)
              Log.net(
                  "RDPServer.RETRY: resending expired packets "
                      + con
                      + " - current list size = "
                      + con.unackListSize());

            // see if we should send a null packet, but only if con is already open
            if ((con.getState() == RDPConnection.OPEN)
                && ((currentTime - con.getLastNullPacketTime()) > 30000)) {
              con.getLock().lock();
              try {
                RDPPacket nulPacket = RDPPacket.makeNulPacket();
                con.sendPacketImmediate(nulPacket, false);
                con.setLastNullPacketTime();
                if (Log.loggingNet) Log.net("RDPServer.retry: sent nul packet: " + nulPacket);
              } finally {
                con.getLock().unlock();
              }
            } else {
              if (Log.loggingNet)
                Log.net(
                    "RDPServer.retry: sending nul packet in "
                        + (30000 - (currentTime - con.getLastNullPacketTime())));
            }
            con.resend(
                currentTime - resendTimerMS, // resend cutoff time
                currentTime - resendTimeoutMS); // giveup time
          }

          long endTime = System.currentTimeMillis();
          if (Log.loggingNet)
            Log.net(
                "RDPServer.RETRY: endTime=" + endTime + ", elapse(ms)=" + (endTime - startTime));
          Thread.sleep(250);
        } catch (Exception e) {
          Log.exception("RDPServer.RetryThread.run caught exception", e);
        }
      }
    }
コード例 #9
0
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }