/** * Waits for renting partitions. * * @return {@code True} if mapping was changed. * @throws IgniteCheckedException If failed. */ private boolean waitForRent() throws IgniteCheckedException { boolean changed = false; // Synchronously wait for all renting partitions to complete. for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) { GridDhtLocalPartition p = it.next(); GridDhtPartitionState state = p.state(); if (state == RENTING || state == EVICTED) { if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p); // Wait for partition to empty out. p.rent(true).get(); if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p); // Remove evicted partition. it.remove(); changed = true; } } return changed; }
public void remove() { if (lastRet < 0) throw new IllegalStateException(); Object x = array[lastRet]; lastRet = -1; // Traverse underlying queue to find == element, // not just a .equals element. lock.lock(); try { for (Iterator it = q.iterator(); it.hasNext(); ) { if (it.next() == x) { it.remove(); return; } } } finally { lock.unlock(); } }
/** * we have a packet that belongs to the passed in connection. process the packet for the * connection. It returns true if the connection is open and the packet was a data packet */ boolean processExistingConnection(RDPConnection con, RDPPacket packet) { if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: con state=" + con + ", packet=" + packet); packetCounter.add(); int state = con.getState(); if (state == RDPConnection.LISTEN) { // something is wrong, we shouldn't be here // we get to this method after looking in the connections map // but all LISTEN connections should be listed direct // from serversockets Log.error("RDPServer.processExistingConnection: connection shouldnt be in LISTEN state"); return false; } if (state == RDPConnection.SYN_SENT) { if (!packet.isAck()) { Log.warn("got a non-ack packet when we're in SYN_SENT"); return false; } if (!packet.isSyn()) { Log.warn("got a non-syn packet when we're in SYN_SENT"); return false; } if (Log.loggingNet) Log.net("good: got syn-ack packet in syn_sent"); // make sure its acking our initial segment # if (packet.getAckNum() != con.getInitialSendSeqNum()) { if (Log.loggingNet) Log.net("syn's ack number does not match initial seq #"); return false; } con.setRcvCur(packet.getSeqNum()); con.setRcvIrs(packet.getSeqNum()); con.setMaxSendUnacks(packet.getSendUnacks()); con.setMaxReceiveSegmentSize(packet.getMaxRcvSegmentSize()); con.setSendUnackd(packet.getAckNum() + 1); // ack first before setting state to open // otherwise some other thread will get woken up and send data // before we send the ack if (Log.loggingNet) Log.net("new connection state: " + con); RDPPacket replyPacket = new RDPPacket(con); con.sendPacketImmediate(replyPacket, false); con.setState(RDPConnection.OPEN); return false; } if (state == RDPConnection.SYN_RCVD) { if (packet.getSeqNum() <= con.getRcvIrs()) { Log.error("seqnum is not above rcv initial seq num"); return false; } if (packet.getSeqNum() > (con.getRcvCur() + (con.getRcvMax() * 2))) { Log.error("seqnum is too big"); return false; } if (packet.isAck()) { if (packet.getAckNum() == con.getInitialSendSeqNum()) { if (Log.loggingNet) Log.net("got ack for our syn - setting state to open"); con.setState(RDPConnection.OPEN); // this will notify() // call the accept callback // first find the serversocket DatagramChannel dc = con.getDatagramChannel(); if (dc == null) { throw new MVRuntimeException( "RDPServer.processExistingConnection: no datagramchannel for connection that just turned OPEN"); } RDPServerSocket rdpSocket = RDPServer.getRDPSocket(dc); if (rdpSocket == null) { throw new MVRuntimeException( "RDPServer.processExistingConnection: no socket for connection that just turned OPEN"); } ClientConnection.AcceptCallback acceptCB = rdpSocket.getAcceptCallback(); if (acceptCB != null) { acceptCB.acceptConnection(con); } else { Log.warn("serversocket has no accept callback"); } if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: got ACK, removing from unack list: " + packet.getSeqNum()); con.removeUnackPacket(packet.getSeqNum()); } } } if (state == RDPConnection.CLOSE_WAIT) { // reply with a reset on all packets if (!packet.isRst()) { RDPPacket rstPacket = RDPPacket.makeRstPacket(); con.sendPacketImmediate(rstPacket, false); } } if (state == RDPConnection.OPEN) { if (packet.isRst()) { // the other side wants to close the connection // set the state, // dont call con.close() since that will send a reset packet if (Log.loggingDebug) Log.debug("RDPServer.processExistingConnection: got reset packet for con " + con); if (con.getState() != RDPConnection.CLOSE_WAIT) { con.setState(RDPConnection.CLOSE_WAIT); con.setCloseWaitTimer(); // Only invoke callback when moving into CLOSE_WAIT // state. This prevents two calls to connectionReset. Log.net("RDPServer.processExistingConnection: calling reset callback"); ClientConnection.MessageCallback pcb = con.getCallback(); pcb.connectionReset(con); } return false; } if (packet.isSyn()) { // this will close the connection (put into CLOSE_WAIT) // send a reset packet and call the connectionReset callback Log.error( "RDPServer.processExistingConnection: closing connection because we got a syn packet, con=" + con); con.close(); return false; } // TODO: shouldnt it be ok for it to have same seq num? // if it is a 0 data packet? long rcvCur = con.getRcvCur(); if (packet.getSeqNum() <= rcvCur) { if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: seqnum too small - acking/not process"); if (packet.getData() != null) { if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: sending ack even though seqnum out of range"); RDPPacket replyPacket = new RDPPacket(con); con.sendPacketImmediate(replyPacket, false); } return false; } if (packet.getSeqNum() > (rcvCur + (con.getRcvMax() * 2))) { Log.error("RDPServer.processExistingConnection: seqnum too big - discarding"); return false; } if (packet.isAck()) { if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: processing ack " + packet.getAckNum()); // lock for race condition (read then set) con.getLock().lock(); try { if (packet.getAckNum() >= con.getSendNextSeqNum()) { // acking something we didnt even send yet Log.error( "RDPServer.processExistingConnection: discarding -- got ack #" + packet.getAckNum() + ", but our next send seqnum is " + con.getSendNextSeqNum() + " -- " + con); return false; } if (con.getSendUnackd() <= packet.getAckNum()) { con.setSendUnackd(packet.getAckNum() + 1); if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: updated send_unackd num to " + con.getSendUnackd() + " (one greater than packet ack) - " + con); con.removeUnackPacketUpTo(packet.getAckNum()); } if (packet.isEak()) { List eackList = packet.getEackList(); Iterator iter = eackList.iterator(); while (iter.hasNext()) { Long seqNum = (Long) iter.next(); if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: got EACK: " + seqNum); con.removeUnackPacket(seqNum.longValue()); } } } finally { con.getLock().unlock(); if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: processed ack " + packet.getAckNum()); } } // process the data byte[] data = packet.getData(); if ((data != null) || packet.isNul()) { dataCounter.add(); // lock - since racecondition: we read then set con.getLock().lock(); try { rcvCur = con.getRcvCur(); // update rcvCur if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: rcvcur is " + rcvCur); ClientConnection.MessageCallback pcb = con.getCallback(); if (pcb == null) { Log.warn("RDPServer.processExistingConnection: no packet callback registered"); } // call callback only if we havent seen it already - eackd if (!con.hasEack(packet.getSeqNum())) { if (con.isSequenced()) { // this is a sequential connection, // make sure this is the 'next' packet // is this the next sequential packet if (packet.getSeqNum() == (rcvCur + 1)) { // this is the next packet if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: conn is sequenced and received next packet, rcvCur=" + rcvCur + ", packet=" + packet); if ((pcb != null) && (data != null)) { queueForCallbackProcessing(pcb, con, packet); } } else { // not the next packet, place it in queue if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: conn is sequenced, BUT PACKET is OUT OF ORDER: rcvcur=" + rcvCur + ", packet=" + packet); con.addSequencePacket(packet); } } else { if ((pcb != null) && (data != null)) { // make sure we havent already processed packet queueForCallbackProcessing(pcb, con, packet); } } } else { if (Log.loggingNet) Log.net(con.toString() + " already seen this packet"); } // is this the next sequential packet if (packet.getSeqNum() == (rcvCur + 1)) { con.setRcvCur(rcvCur + 1); if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection RCVD: incremented last sequenced rcvd: " + (rcvCur + 1)); // packet in order - dont add to eack // Take any additional sequential packets off eack long seqNum = rcvCur + 2; while (con.removeEack(seqNum)) { if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: removing/collapsing eack: " + seqNum); con.setRcvCur(seqNum++); } if (con.isSequenced()) { rcvCur++; // since we just process the last one Log.net( "RDPServer.processExistingConnection: connection is sequenced, processing collapsed packets."); // send any saved sequential packets also Iterator iter = con.getSequencePackets().iterator(); while (iter.hasNext()) { RDPPacket p = (RDPPacket) iter.next(); if (Log.loggingNet) Log.net( "rdpserver: stored packet seqnum=" + p.getSeqNum() + ", if equal to (rcvcur + 1)=" + (rcvCur + 1)); if (p.getSeqNum() == (rcvCur + 1)) { Log.net( "RDPServer.processExistingConnection: this is the next packet, processing"); // this is the next packet - update rcvcur rcvCur++; // process this packet Log.net( "RDPServer.processExistingConnection: processing stored sequential packet " + p); byte[] storedData = p.getData(); if (pcb != null && storedData != null) { queueForCallbackProcessing(pcb, con, packet); } iter.remove(); } } } else { if (Log.loggingNet) Log.net("RDPServer.processExistingConnection: connection is not sequenced"); } } else { if (Log.loggingNet) Log.net( "RDPServer.processExistingConnection: RCVD OUT OF ORDER: packet seq#: " + packet.getSeqNum() + ", but last sequential rcvd packet was: " + con.getRcvCur() + " -- not incrementing counter"); if (packet.getSeqNum() > rcvCur) { // must be at least + 2 larger than rcvCur if (Log.loggingNet) Log.net("adding to eack list " + packet); con.addEack(packet); } } } finally { con.getLock().unlock(); } return true; } } return false; }
/** returns a list of rdpserversockets */ Set<DatagramChannel> getActiveChannels() throws InterruptedException, java.io.IOException { lock.lock(); try { while (channelMap.isEmpty()) { channelMapNotEmpty.await(); } } finally { lock.unlock(); } Set<SelectionKey> readyKeys = null; do { lock.lock(); try { if (!newChannelSet.isEmpty()) { if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: newChannelSet is not null"); Iterator<DatagramChannel> iter = newChannelSet.iterator(); while (iter.hasNext()) { DatagramChannel newDC = iter.next(); iter.remove(); newDC.register(selector, SelectionKey.OP_READ); } } } finally { lock.unlock(); } int numReady = selector.select(); // this is a blocking call - thread safe selectCalls++; if (numReady == 0) { if (Log.loggingNet) Log.net("RDPServer.getActiveChannels: selector returned 0"); continue; } readyKeys = selector.selectedKeys(); if (Log.loggingNet) Log.net( "RDPServer.getActiveChannels: called select - # of ready keys = " + readyKeys.size() + " == " + numReady); } while (readyKeys == null || readyKeys.isEmpty()); lock.lock(); try { // get a datagramchannel that is ready Set<DatagramChannel> activeChannels = new HashSet<DatagramChannel>(); Iterator<SelectionKey> iter = readyKeys.iterator(); while (iter.hasNext()) { SelectionKey key = iter.next(); if (Log.loggingNet) Log.net( "RDPServer.getActiveChannels: matched selectionkey: " + key + ", isAcceptable=" + key.isAcceptable() + ", isReadable=" + key.isReadable() + ", isValid=" + key.isValid() + ", isWritable=" + key.isWritable()); iter.remove(); // remove from the selected key list if (!key.isReadable() || !key.isValid()) { Log.error( "RDPServer.getActiveChannels: Throwing exception: RDPServer: not readable or invalid"); throw new MVRuntimeException("RDPServer: not readable or invalid"); } DatagramChannel dc = (DatagramChannel) key.channel(); activeChannels.add(dc); } if (Log.loggingNet) Log.net( "RDPServer.getActiveChannels: returning " + activeChannels.size() + " active channels"); return activeChannels; } finally { lock.unlock(); } }
/** {@inheritDoc} */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) @Nullable @Override public GridDhtPartitionMap update( @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) { if (log.isDebugEnabled()) log.debug( "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']'); assert partMap != null; lock.writeLock().lock(); try { if (stopping) return null; if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale exchange id for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ']'); return null; } if (node2part != null && node2part.compareTo(partMap) >= 0) { if (log.isDebugEnabled()) log.debug( "Stale partition map for full partition map update (will ignore) [lastExchId=" + lastExchangeId + ", exchId=" + exchId + ", curMap=" + node2part + ", newMap=" + partMap + ']'); return null; } long updateSeq = this.updateSeq.incrementAndGet(); if (exchId != null) lastExchangeId = exchId; if (node2part != null) { for (GridDhtPartitionMap part : node2part.values()) { GridDhtPartitionMap newPart = partMap.get(part.nodeId()); // If for some nodes current partition has a newer map, // then we keep the newer value. if (newPart != null && newPart.updateSequence() < part.updateSequence()) { if (log.isDebugEnabled()) log.debug( "Overriding partition map in full update map [exchId=" + exchId + ", curPart=" + mapString(part) + ", newPart=" + mapString(newPart) + ']'); partMap.put(part.nodeId(), part); } } for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) { UUID nodeId = it.next(); if (!cctx.discovery().alive(nodeId)) { if (log.isDebugEnabled()) log.debug( "Removing left node from full map update [nodeId=" + nodeId + ", partMap=" + partMap + ']'); it.remove(); } } } node2part = partMap; Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f); for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) { for (Integer p : e.getValue().keySet()) { Set<UUID> ids = p2n.get(p); if (ids == null) // Initialize HashSet to size 3 in anticipation that there won't be // more than 3 nodes per partitions. p2n.put(p, ids = U.newHashSet(3)); ids.add(e.getKey()); } } part2node = p2n; boolean changed = checkEvictions(updateSeq); consistencyCheck(); if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString()); return changed ? localPartitionMap() : null; } finally { lock.writeLock().unlock(); } }