/** * @param cctx Context. * @param id Partition ID. */ @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor") GridDhtLocalPartition(GridCacheContext cctx, int id) { assert cctx != null; this.id = id; this.cctx = cctx; log = U.logger(cctx.kernalContext(), logRef, this); rent = new GridFutureAdapter<Object>() { @Override public String toString() { return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']'; } }; map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions()); int delQueueSize = CU.isSystemCache(cctx.name()) ? 100 : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20); rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize)); }
/** * Read block from file. * * @param file - File to read. * @param off - Marker position in file to start read from if {@code -1} read last blockSz bytes. * @param blockSz - Maximum number of chars to read. * @param lastModified - File last modification time. * @return Read file block. * @throws IOException In case of error. */ public static VisorFileBlock readBlock(File file, long off, int blockSz, long lastModified) throws IOException { RandomAccessFile raf = null; try { long fSz = file.length(); long fLastModified = file.lastModified(); long pos = off >= 0 ? off : Math.max(fSz - blockSz, 0); // Try read more that file length. if (fLastModified == lastModified && fSz != 0 && pos >= fSz) throw new IOException( "Trying to read file block with wrong offset: " + pos + " while file size: " + fSz); if (fSz == 0) return new VisorFileBlock(file.getPath(), pos, fLastModified, 0, false, EMPTY_FILE_BUF); else { int toRead = Math.min(blockSz, (int) (fSz - pos)); byte[] buf = new byte[toRead]; raf = new RandomAccessFile(file, "r"); raf.seek(pos); int cntRead = raf.read(buf, 0, toRead); if (cntRead != toRead) throw new IOException( "Count of requested and actually read bytes does not match [cntRead=" + cntRead + ", toRead=" + toRead + ']'); boolean zipped = buf.length > 512; return new VisorFileBlock( file.getPath(), pos, fSz, fLastModified, zipped, zipped ? zipBytes(buf) : buf); } } finally { U.close(raf, null); } }
/** * This constructor is meant for optimistic transactions. * * @param ldr Class loader. * @param nodeId Node ID. * @param nearNodeId Near node ID. * @param rmtThreadId Remote thread ID. * @param xidVer XID version. * @param commitVer Commit version. * @param sys System flag. * @param concurrency Concurrency level (should be pessimistic). * @param isolation Transaction isolation. * @param invalidate Invalidate flag. * @param timeout Timeout. * @param writeEntries Write entries. * @param ctx Cache registry. * @param txSize Expected transaction size. * @throws IgniteCheckedException If unmarshalling failed. */ public GridNearTxRemote( GridCacheSharedContext ctx, ClassLoader ldr, UUID nodeId, UUID nearNodeId, long rmtThreadId, GridCacheVersion xidVer, GridCacheVersion commitVer, boolean sys, byte plc, TransactionConcurrency concurrency, TransactionIsolation isolation, boolean invalidate, long timeout, Collection<IgniteTxEntry> writeEntries, int txSize, @Nullable UUID subjId, int taskNameHash) throws IgniteCheckedException { super( ctx, nodeId, rmtThreadId, xidVer, commitVer, sys, plc, concurrency, isolation, invalidate, timeout, txSize, subjId, taskNameHash); assert nearNodeId != null; this.nearNodeId = nearNodeId; readMap = Collections.emptyMap(); writeMap = new LinkedHashMap<>( writeEntries != null ? Math.max(txSize, writeEntries.size()) : txSize, 1.0f); if (writeEntries != null) { for (IgniteTxEntry entry : writeEntries) { entry.unmarshal(ctx, true, ldr); addEntry(entry); } } }
private void scheduleRecheck() { if (!isDone()) { GridTimeoutObject old = timeoutObj; if (old != null) cctx.kernalContext().timeout().removeTimeoutObject(old); GridTimeoutObject timeoutObj = new GridTimeoutObjectAdapter( cctx.gridConfig().getNetworkTimeout() * Math.max(1, cctx.gridConfig().getCacheConfiguration().length)) { @Override public void onTimeout() { cctx.kernalContext() .closure() .runLocalSafe( new Runnable() { @Override public void run() { if (isDone()) return; if (!enterBusy()) return; try { U.warn( log, "Retrying preload partition exchange due to timeout [done=" + isDone() + ", dummy=" + dummy + ", exchId=" + exchId + ", rcvdIds=" + F.id8s(rcvdIds) + ", rmtIds=" + F.id8s(rmtIds) + ", remaining=" + F.id8s(remaining()) + ", init=" + init + ", initFut=" + initFut.isDone() + ", ready=" + ready + ", replied=" + replied + ", added=" + added + ", oldest=" + U.id8(oldestNode.get().id()) + ", oldestOrder=" + oldestNode.get().order() + ", evtLatch=" + evtLatch.getCount() + ", locNodeOrder=" + cctx.localNode().order() + ", locNodeId=" + cctx.localNode().id() + ']', "Retrying preload partition exchange due to timeout."); recheck(); } finally { leaveBusy(); } } }); } }; this.timeoutObj = timeoutObj; cctx.kernalContext().timeout().addTimeoutObject(timeoutObj); } }