@NotNull public static char[] adaptiveLoadText(@NotNull Reader reader) throws IOException { char[] chars = new char[4096]; List<char[]> buffers = null; int count = 0; int total = 0; while (true) { int n = reader.read(chars, count, chars.length - count); if (n <= 0) break; count += n; if (total > 1024 * 1024 * 10) throw new FileTooBigException("File too big " + reader); total += n; if (count == chars.length) { if (buffers == null) { buffers = new ArrayList<char[]>(); } buffers.add(chars); int newLength = Math.min(1024 * 1024, chars.length * 2); chars = new char[newLength]; count = 0; } } char[] result = new char[total]; if (buffers != null) { for (char[] buffer : buffers) { System.arraycopy(buffer, 0, result, result.length - total, buffer.length); total -= buffer.length; } } System.arraycopy(chars, 0, result, result.length - total, total); return result; }
/** * Validate streams generate the same output. * * @param expIn Expected input stream. * @param actIn Actual input stream. * @param expSize Expected size of the streams. * @param seek Seek to use async position-based reading or {@code null} to use simple continuous * reading. * @throws IOException In case of any IO exception. */ private void assertEqualStreams( InputStream expIn, GridGgfsInputStream actIn, @Nullable Long expSize, @Nullable Long seek) throws IOException { if (seek != null) expIn.skip(seek); int bufSize = 2345; byte buf1[] = new byte[bufSize]; byte buf2[] = new byte[bufSize]; long pos = 0; long start = System.currentTimeMillis(); while (true) { int read = (int) Math.min(bufSize, expSize - pos); int i1; if (seek == null) i1 = actIn.read(buf1, 0, read); else if (seek % 2 == 0) i1 = actIn.read(pos + seek, buf1, 0, read); else { i1 = read; actIn.readFully(pos + seek, buf1, 0, read); } // Read at least 0 byte, but don't read more then 'i1' or 'read'. int i2 = expIn.read(buf2, 0, Math.max(0, Math.min(i1, read))); if (i1 != i2) { fail( "Expects the same data [read=" + read + ", pos=" + pos + ", seek=" + seek + ", i1=" + i1 + ", i2=" + i2 + ']'); } if (i1 == -1) break; // EOF // i1 == bufSize => compare buffers. // i1 < bufSize => Compare part of buffers, rest of buffers are equal from previous // iteration. assertTrue( "Expects the same data [read=" + read + ", pos=" + pos + ", seek=" + seek + ", i1=" + i1 + ", i2=" + i2 + ']', Arrays.equals(buf1, buf2)); if (read == 0) break; // Nothing more to read. pos += i1; } if (expSize != null) assertEquals(expSize.longValue(), pos); long time = System.currentTimeMillis() - start; if (time != 0 && log.isInfoEnabled()) { log.info( String.format( "Streams were compared in continuous reading " + "[size=%7d, rate=%3.1f MB/sec]", expSize, expSize * 1000. / time / 1024 / 1024)); } }
/** * Updates this bar with next bar. * * @param bar Next bar. */ public synchronized void update(Bar bar) { if (open == 0) open = bar.open; high = Math.max(high, bar.high); low = Math.min(low, bar.low); close = bar.close; }
/** * Updates this bar with last price. * * @param price Price. */ public synchronized void update(double price) { if (open == 0) open = price; high = Math.max(high, price); low = Math.min(low, price); close = price; }
@NotNull public static byte[] adaptiveLoadBytes(@NotNull InputStream stream) throws IOException { byte[] bytes = new byte[4096]; List<byte[]> buffers = null; int count = 0; int total = 0; while (true) { int n = stream.read(bytes, count, bytes.length - count); if (n <= 0) break; count += n; if (total > 1024 * 1024 * 10) throw new FileTooBigException("File too big " + stream); total += n; if (count == bytes.length) { if (buffers == null) { buffers = new ArrayList<byte[]>(); } buffers.add(bytes); int newLength = Math.min(1024 * 1024, bytes.length * 2); bytes = new byte[newLength]; count = 0; } } byte[] result = new byte[total]; if (buffers != null) { for (byte[] buffer : buffers) { System.arraycopy(buffer, 0, result, result.length - total, buffer.length); total -= buffer.length; } } System.arraycopy(bytes, 0, result, result.length - total, total); return result; }
/** * @param cctx Context. * @param id Partition ID. */ @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor") GridDhtLocalPartition(GridCacheContext cctx, int id) { assert cctx != null; this.id = id; this.cctx = cctx; log = U.logger(cctx.kernalContext(), logRef, this); rent = new GridFutureAdapter<Object>() { @Override public String toString() { return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']'; } }; map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions()); int delQueueSize = CU.isSystemCache(cctx.name()) ? 100 : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20); rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize)); }
/** * Read block from file. * * @param file - File to read. * @param off - Marker position in file to start read from if {@code -1} read last blockSz bytes. * @param blockSz - Maximum number of chars to read. * @param lastModified - File last modification time. * @return Read file block. * @throws IOException In case of error. */ public static VisorFileBlock readBlock(File file, long off, int blockSz, long lastModified) throws IOException { RandomAccessFile raf = null; try { long fSz = file.length(); long fLastModified = file.lastModified(); long pos = off >= 0 ? off : Math.max(fSz - blockSz, 0); // Try read more that file length. if (fLastModified == lastModified && fSz != 0 && pos >= fSz) throw new IOException( "Trying to read file block with wrong offset: " + pos + " while file size: " + fSz); if (fSz == 0) return new VisorFileBlock(file.getPath(), pos, fLastModified, 0, false, EMPTY_FILE_BUF); else { int toRead = Math.min(blockSz, (int) (fSz - pos)); byte[] buf = new byte[toRead]; raf = new RandomAccessFile(file, "r"); raf.seek(pos); int cntRead = raf.read(buf, 0, toRead); if (cntRead != toRead) throw new IOException( "Count of requested and actually read bytes does not match [cntRead=" + cntRead + ", toRead=" + toRead + ']'); boolean zipped = buf.length > 512; return new VisorFileBlock( file.getPath(), pos, fSz, fLastModified, zipped, zipped ? zipBytes(buf) : buf); } } finally { U.close(raf, null); } }
public static void copy( @NotNull InputStream inputStream, int maxSize, @NotNull OutputStream outputStream) throws IOException { final byte[] buffer = BUFFER.get(); int toRead = maxSize; while (toRead > 0) { int read = inputStream.read(buffer, 0, Math.min(buffer.length, toRead)); if (read < 0) break; toRead -= read; outputStream.write(buffer, 0, read); } }
/** * This constructor is meant for optimistic transactions. * * @param ldr Class loader. * @param nodeId Node ID. * @param nearNodeId Near node ID. * @param rmtThreadId Remote thread ID. * @param xidVer XID version. * @param commitVer Commit version. * @param sys System flag. * @param concurrency Concurrency level (should be pessimistic). * @param isolation Transaction isolation. * @param invalidate Invalidate flag. * @param timeout Timeout. * @param writeEntries Write entries. * @param ctx Cache registry. * @param txSize Expected transaction size. * @throws IgniteCheckedException If unmarshalling failed. */ public GridNearTxRemote( GridCacheSharedContext ctx, ClassLoader ldr, UUID nodeId, UUID nearNodeId, long rmtThreadId, GridCacheVersion xidVer, GridCacheVersion commitVer, boolean sys, byte plc, TransactionConcurrency concurrency, TransactionIsolation isolation, boolean invalidate, long timeout, Collection<IgniteTxEntry> writeEntries, int txSize, @Nullable UUID subjId, int taskNameHash) throws IgniteCheckedException { super( ctx, nodeId, rmtThreadId, xidVer, commitVer, sys, plc, concurrency, isolation, invalidate, timeout, txSize, subjId, taskNameHash); assert nearNodeId != null; this.nearNodeId = nearNodeId; readMap = Collections.emptyMap(); writeMap = new LinkedHashMap<>( writeEntries != null ? Math.max(txSize, writeEntries.size()) : txSize, 1.0f); if (writeEntries != null) { for (IgniteTxEntry entry : writeEntries) { entry.unmarshal(ctx, true, ldr); addEntry(entry); } } }
@NotNull public static byte[] loadFirst(@NotNull InputStream stream, int maxLength) throws IOException { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); final byte[] bytes = BUFFER.get(); while (maxLength > 0) { int n = stream.read(bytes, 0, Math.min(maxLength, bytes.length)); if (n <= 0) break; buffer.write(bytes, 0, n); maxLength -= n; } buffer.close(); return buffer.toByteArray(); }
@Override public int getVisibilityData( final int x, final int y, final CellVisibilityData[] cellVisibilityData, final int index) { assert this.checkGetVisibilityData(x, y, cellVisibilityData); int numZ = 1; final CellVisibilityData data = cellVisibilityData[index]; data.m_x = x; data.m_y = y; data.m_z = this.getZ(x, y); data.m_height = (byte) Math.max(0, data.m_z); data.m_hollow = false; if (this.m_attachedBuilding.length != 0) { for (final AbstractBuildingStruct building : this.m_attachedBuilding) { final int result = building.getVisibilityData(x, y, cellVisibilityData, index + numZ); if (result != -1) { numZ += mergeSameAltitude(cellVisibilityData, index, index + numZ, result); } } } return numZ; }
@Override public final int getPathData( final int x, final int y, final CellPathData[] cellPathData, final int index) { assert this.checkGetPathData(x, y, cellPathData); int numZ = 1; final CellPathData data = cellPathData[index]; data.m_x = x; data.m_y = y; data.m_z = this.getZ(x, y); data.m_height = (byte) Math.max(0, data.m_z); data.m_cost = (byte) (super.isCellBlocked(x, y) ? -1 : 7); data.m_hollow = false; data.m_murfinInfo = (byte) this.getMurFinInfo(x, y); data.m_miscProperties = (byte) this.getMiscProperties(x, y); if (this.m_attachedBuilding.length != 0) { for (final AbstractBuildingStruct building : this.m_attachedBuilding) { final int result = building.getPathData(x, y, cellPathData, index + numZ); if (result != -1) { numZ += mergeSameAltitude(cellPathData, index, index + numZ, result); } } } return numZ; }
private void scheduleRecheck() { if (!isDone()) { GridTimeoutObject old = timeoutObj; if (old != null) cctx.kernalContext().timeout().removeTimeoutObject(old); GridTimeoutObject timeoutObj = new GridTimeoutObjectAdapter( cctx.gridConfig().getNetworkTimeout() * Math.max(1, cctx.gridConfig().getCacheConfiguration().length)) { @Override public void onTimeout() { cctx.kernalContext() .closure() .runLocalSafe( new Runnable() { @Override public void run() { if (isDone()) return; if (!enterBusy()) return; try { U.warn( log, "Retrying preload partition exchange due to timeout [done=" + isDone() + ", dummy=" + dummy + ", exchId=" + exchId + ", rcvdIds=" + F.id8s(rcvdIds) + ", rmtIds=" + F.id8s(rmtIds) + ", remaining=" + F.id8s(remaining()) + ", init=" + init + ", initFut=" + initFut.isDone() + ", ready=" + ready + ", replied=" + replied + ", added=" + added + ", oldest=" + U.id8(oldestNode.get().id()) + ", oldestOrder=" + oldestNode.get().order() + ", evtLatch=" + evtLatch.getCount() + ", locNodeOrder=" + cctx.localNode().order() + ", locNodeId=" + cctx.localNode().id() + ']', "Retrying preload partition exchange due to timeout."); recheck(); } finally { leaveBusy(); } } }); } }; this.timeoutObj = timeoutObj; cctx.kernalContext().timeout().addTimeoutObject(timeoutObj); } }
/** * Removes locks regardless of whether they are owned or not for given version and keys. * * @param ver Lock version. * @param keys Keys. */ @SuppressWarnings({"unchecked"}) public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) { if (keys.isEmpty()) return; try { Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridNode, GridNearUnlockRequest<K, V>> map = null; for (K key : keys) { // Send request to remove from remote nodes. GridNearUnlockRequest<K, V> req = null; while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); try { if (entry != null) { GridCacheMvccCandidate<K> cand = entry.candidate(ver); if (cand != null) { if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); if (!primary.isLocal()) { req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } } // Remove candidate from local node first. if (entry.removeLock(cand.version())) { if (primary.isLocal()) { dht.removeLocks(primary.id(), ver, F.asList(key), true); assert req == null; continue; } req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } } } break; } catch (GridCacheEntryRemovedException ignored) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock from removed entry (will retry) [rmvVer=" + ver + ", entry=" + entry + ']'); } } } if (map == null || map.isEmpty()) return; Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver); Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver); for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (!req.keyBytes().isEmpty()) { req.completedVersions(committed, rolledback); // We don't wait for reply to this message. ctx.io().send(n, req); } } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** {@inheritDoc} */ @Override public void unlockAll( Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (keys.isEmpty()) return; try { GridCacheVersion ver = null; Collection<GridRichNode> affNodes = null; int keyCnt = -1; Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null; Collection<K> locKeys = new LinkedList<K>(); GridCacheVersion obsoleteVer = ctx.versions().next(); for (K key : keys) { while (true) { GridDistributedCacheEntry<K, V> entry = peekExx(key); if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While. try { GridCacheMvccCandidate<K> cand = entry.candidate(ctx.nodeId(), Thread.currentThread().getId()); if (cand != null) { ver = cand.version(); if (affNodes == null) { affNodes = CU.allNodes(ctx, cand.topologyVersion()); keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size()); map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size()); } // Send request to remove from remote nodes. GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes)); GridNearUnlockRequest<K, V> req = map.get(primary); if (req == null) { map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt)); req.version(ver); } // Remove candidate from local node first. GridCacheMvccCandidate<K> rmv = entry.removeLock(); if (rmv != null) { if (!rmv.reentry()) { if (ver != null && !ver.equals(rmv.version())) throw new GridException( "Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys); if (!primary.isLocal()) { assert req != null; req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx); } else locKeys.add(key); if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv); } else if (log.isDebugEnabled()) log.debug( "Current thread still owns lock (or there are no other nodes)" + " [lock=" + rmv + ", curThreadId=" + Thread.currentThread().getId() + ']'); } // Try to evict near entry if it's dht-mapped locally. evictNearEntry(entry, obsoleteVer); } break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Attempted to unlock removed entry (will retry): " + entry); } } } if (ver == null) return; for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) { GridRichNode n = mapping.getKey(); GridDistributedUnlockRequest<K, V> req = mapping.getValue(); if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true); else if (!req.keyBytes().isEmpty()) // We don't wait for reply to this message. ctx.io().send(n, req); } } catch (GridException ex) { U.error(log, "Failed to unlock the lock for keys: " + keys, ex); } }
/** * Rounds double value to two significant signs. * * @param val value to be rounded. * @return rounded double value. */ private static double round2(double val) { return Math.floor(100 * val + 0.5) / 100; }