public c(Context paramContext, boolean paramBoolean) { i = paramContext; B = paramBoolean; j = ((NotificationManager) i.getSystemService("notification")); k = false; m = (g.addAndGet(1) + (int) SystemClock.elapsedRealtime()); y.setTextSize(16.0F); if (Build.VERSION.SDK_INT >= 14) { paramContext = i; x = h.b( null, "android.app.Notification$Builder", new Class[] {Context.class}, new Context[] {paramContext}); } for (; ; ) { if (d != null) {} synchronized (c) { if (B) { c.add(Integer.valueOf(m)); if (!f) { if (!bd.j()) { break label305; } paramContext = new Notification( 2130837892, FexApplication.a().getText(2131428105), System.currentTimeMillis()); PendingIntent localPendingIntent = PendingIntent.getActivity(FexApplication.a(), 0, new Intent(), 0); paramContext.setLatestEventInfo( FexApplication.a(), FexApplication.a().getText(2131427488), FexApplication.a().getText(2131428105), localPendingIntent); flags |= 0x20; d.startForeground(g.addAndGet(1) + (int) SystemClock.elapsedRealtime(), paramContext); f = true; } } return; w = new af(i); continue; label305: paramContext = new Notification(0, null, System.currentTimeMillis()); } } }
/** addAndGet adds given value to current, and returns current value */ public void testAddAndGet() { AtomicInteger ai = new AtomicInteger(1); assertEquals(3, ai.addAndGet(2)); assertEquals(3, ai.get()); assertEquals(-1, ai.addAndGet(-4)); assertEquals(-1, ai.get()); }
public void save(Chunk chunk) { ChunkPosition pos = chunk.getPosition(); CompressedChunk c = chunks.get(pos); if (c == null) { c = new CompressedChunk(pos); chunks.put(pos, c); c.setVersion(~chunk.getVersion()); } if (c.getVersion() != chunk.getVersion()) { if (c.getData() != null) { approxSize.addAndGet(-c.getData().length); } long start = TimeStatistics.TIME_STATISTICS.start(); // ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); // DataOutputStream dataOut = new DataOutputStream(byteOut); // compressor.compress(dataOut, chunk.getDataXZY()); try { c.setData(DeflaterWrapper.compress(chunk.getDataXZY())); // c.setData(DeflaterWrapper.compress(byteOut.toByteArray())); } catch (IOException ex) { throw new RuntimeException(ex); } c.setVersion(chunk.getVersion()); TimeStatistics.TIME_STATISTICS.end(start, "save"); approxSize.addAndGet(-c.getData().length); if (chunks.size() % 1000 == 0) { System.out.println( Util.humanReadableByteCount(approxSize.get()) + ", " + Util.humanReadableByteCount(approxSize.get() / chunks.size()) + "/chunk"); } } }
@Override public VisualSearchResult reevaluate( List<SearchResult> sourceValue, VisualSearchResult transformedValue) { resultCount.addAndGet(-transformedValue.getSources().size()); ((SearchResultAdapter) transformedValue).update(); resultCount.addAndGet(transformedValue.getSources().size()); return transformedValue; }
@Override public boolean reserve(int quantity) { int remaining = count.addAndGet(-quantity); if (remaining < 0) { count.addAndGet(quantity); return false; } return true; }
void foo() { i.getAndAdd(2); i.getAndAdd(-5); if (i.get() == 0) { i.set(9); } System.out.println(i.addAndGet(9)); System.out.println(i.addAndGet(-(9))); }
@Override public Block allocate(int size) { if (size <= 0) { throw new OutOfBoundException("Size can't be negative or be zero: " + size); } metrics.increment("vmtable.totalAllocations"); TimeContext timer = metrics.getTimer("vmtable.allocationTime"); timer.start(); final TableBlock freeBlock = findBlockToAllocateFrom(size); if (freeBlock != null) { final TableBlock result; try { result = new TableBlock(freeBlock.getAddress(), size); if (freeBlock.getSize() == size) { freeBlock.resize(0, 0); removeBlock(free, freeBlock, freeLock); metrics.decrement("vmtable.fragmentation"); } else { freeBlock.resize(freeBlock.getAddress() + size, freeBlock.getSize() - size); metrics.increment("vmtable.fragmentation"); } freeMemorySize.addAndGet(-size); } finally { // unlock asap freeBlock.unlock(); } insertBlock(used, result, usedLock); usedMemorySize.addAndGet(size); timer.stop(); metrics.mark("vmtable.freeSize", freeMemorySize.longValue()); metrics.mark("vmtable.usedSize", usedMemorySize.longValue()); metrics.mark("vmtable.freeBlocksCount", free.size()); metrics.mark("vmtable.usedBlocksCount", used.size()); return result; } timer.stop(); metrics.increment("vmtable.failedAllocations"); return null; }
@Override public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) { final QueryData header = result.getHeader(); final DrillBuf data = result.getData(); if (data != null) { count.addAndGet(header.getRowCount()); try { loader.load(header.getDef(), data); // TODO: Clean: DRILL-2933: That load(...) no longer throws // SchemaChangeException, so check/clean catch clause below. } catch (SchemaChangeException e) { submissionFailed(UserException.systemError(e).build(logger)); } switch (format) { case TABLE: VectorUtil.showVectorAccessibleContent(loader, columnWidth); break; case TSV: VectorUtil.showVectorAccessibleContent(loader, "\t"); break; case CSV: VectorUtil.showVectorAccessibleContent(loader, ","); break; } loader.clear(); } result.release(); }
@Override public void put(String key, File file) { int valueSize = getSize(file); int curCacheSize = cacheSize.get(); while (curCacheSize + valueSize > sizeLimit) { int freedSize = removeNext(); if (freedSize == INVALID_SIZE) break; // cache is empty (have nothing to delete) curCacheSize = cacheSize.addAndGet(-freedSize); } cacheSize.addAndGet(valueSize); Long currentTime = System.currentTimeMillis(); file.setLastModified(currentTime); lastUsageDates.put(file, currentTime); }
public boolean deleteProfile(String username, String password) { AtomicBoolean toReturn = new AtomicBoolean(false); AtomicInteger numberProfilesDeleted = new AtomicInteger(0); profileList .parallelStream() .filter( (userProfile) -> { return (userProfile.get(ProfileItem.USERNAME.ordinal()).equals(username) && userProfile.get(ProfileItem.PASSWORD.ordinal()).equals(password)); }) .forEach( (userProfile) -> { profileList.remove(userProfile); numberProfilesDeleted.addAndGet(1); toReturn.set(true); }); // This should never happen (there should not be any username overlap). // If it does happen, there is some sort of security issue with the software. // Throw an exception to notify the user. if (numberProfilesDeleted.get() > 1) { throw new SecurityException( numberProfilesDeleted.get() + " profiles had the same username and password and were deleted."); } return toReturn.get(); }
@Override public VisualSearchResult evaluate(List<SearchResult> sourceValue) { VisualSearchResult adapter = new SearchResultAdapter(sourceValue, propertiableHeadings); resultCount.addAndGet(adapter.getSources().size()); return adapter; }
public void testSneakingInAProc() throws IOException { System.out.println("STARTING testSneakingInAProc"); Client client = getClient(); int ctr = 0; for (int i = 0; i < 10; i++) { client.callProcedure(new MPCallback(), "MultiPartition"); ctr++; client.callProcedure(new SPCallback(), "SinglePartition", ctr, ctr); ctr++; client.callProcedure(new SPCallback(), "SinglePartition", ctr, ctr); ctr++; client.callProcedure(new SPCallback(), "SinglePartition", ctr, ctr); ctr++; client.callProcedure(new SPCallback(), "SinglePartition", ctr, ctr); ctr++; } answersReceived.addAndGet(ctr); client.drain(); while (answersReceived.get() > 0) { try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } } }
@Override public void handle(ReutersDoc doc) { Document searchDoc = new Document(); searchDoc.fields = Maps.newHashMap(); searchDoc.fields.put("title", doc.title); searchDoc.fields.put("text", doc.text); documentsBuffer.add(searchDoc); if (documentsBuffer.size() >= bufferSize) { try { long before = System.nanoTime(); broker.index(this.shard, documentsBuffer); long after = System.nanoTime(); counter.addAndGet(documentsBuffer.size()); indexTime += after - before; } catch (TException e) { e.printStackTrace(); } documentsBuffer.clear(); } }
public synchronized void write(final PagedMessage message) throws Exception { if (!file.isOpen()) { return; } ByteBuffer buffer = fileFactory.newBuffer(message.getEncodeSize() + Page.SIZE_RECORD); HornetQBuffer wrap = HornetQBuffers.wrappedBuffer(buffer); wrap.clear(); wrap.writeByte(Page.START_BYTE); wrap.writeInt(0); int startIndex = wrap.writerIndex(); message.encode(wrap); int endIndex = wrap.writerIndex(); wrap.setInt(1, endIndex - startIndex); // The encoded length wrap.writeByte(Page.END_BYTE); buffer.rewind(); file.writeDirect(buffer, false); if (pageCache != null) { pageCache.addLiveMessage(message); } numberOfMessages.incrementAndGet(); size.addAndGet(buffer.limit()); storageManager.pageWrite(message, pageId); }
@Override public AddResponse add(Collection<InputDocument> inputDocuments) { try { if (logger.isDebugEnabled()) { logger.debug("adding documents..."); } for (InputDocument inputDocument : inputDocuments) { assertIdExist(inputDocument); } for (Document document : DocumentTransformUtil.toLuceneDocuments(inputDocuments, schema)) { indexWriter.updateDocument( new Term(schema.getIdName(), document.getFieldable(schema.getIdName()).stringValue()), document, schema.getAnalyzer()); } updateCount.addAndGet(inputDocuments.size()); if (logger.isDebugEnabled()) { logger.debug("add documents finish."); } } catch (Exception e) { logger.error("add documents error", e); return new AddResponse(e.getMessage(), ResultCodes.COMMON_ERROR); } return new AddResponse(); }
// ------------------------------------------------------------------------- public void test_forEach() { LocalDateDoubleTimeSeries base = LocalDateDoubleTimeSeries.builder().putAll(DATES_2010_14, VALUES_10_14).build(); AtomicInteger counter = new AtomicInteger(); base.forEach((date, value) -> counter.addAndGet((int) value)); assertEquals(counter.get(), 10 + 11 + 12 + 13 + 14); }
@Override public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) { int rows = result.getHeader().getRowCount(); System.out.println(String.format("Result batch arrived. Number of records: %d", rows)); count.addAndGet(rows); result.release(); }
/* * (non-Javadoc) * * @see com.prashant.memory.MemoryBuffer#store(byte[], int) */ public synchronized Pointer store(byte[] payload, int capacity) throws DMBufferOverFlowException { // First good match is page that has capacity equal or greater than // payload. Pointer goodOne = firstMatch(capacity); if (goodOne == null) { RuntimeException e = new NullPointerException(); // logger.error("Did not find a suitable buffer"); throw new DMBufferOverFlowException("did not find a suitable buffer", e.getCause()); } Pointer fresh = slice(goodOne, capacity); fresh.setFree(false); used.addAndGet(payload.length); ByteBuffer buf = buffer.slice(); buf.position(fresh.getStart()); try { buf.put(payload); } catch (BufferOverflowException e) { goodOne.setStart(fresh.getStart()); goodOne.setEnd(buffer.limit()); // return null; //Uncomment incase we want to ignore this exception. throw new DMBufferOverFlowException( "An attempt to store more than the configured capacity", e.getCause()); } pointers.add(fresh); return fresh; }
@Override public DeleteResponse deleteByIds(List<String> ids) { if (CollectionUtil.isEmpty(ids)) { return new DeleteResponse(); } final String idName = schema.getIdName(); Term[] terms = new Term[ids.size()]; int index = 0; for (String id : ids) { terms[index++] = new Term(idName, id); } try { if (logger.isDebugEnabled()) { logger.debug("deleting documents..."); } indexWriter.deleteDocuments(terms); updateCount.addAndGet(ids.size()); if (logger.isDebugEnabled()) { logger.debug("delete documents finish."); } } catch (IOException e) { logger.error("delete error", e); return new DeleteResponse(e.getMessage(), ResultCodes.COMMON_ERROR); } return new DeleteResponse(); }
// Appends a new packet of buffered deletes to the stream, // setting its generation: public synchronized long push(FrozenBufferedUpdates packet) { /* * The insert operation must be atomic. If we let threads increment the gen * and push the packet afterwards we risk that packets are out of order. * With DWPT this is possible if two or more flushes are racing for pushing * updates. If the pushed packets get our of order would loose documents * since deletes are applied to the wrong segments. */ packet.setDelGen(nextGen++); assert packet.any(); assert checkDeleteStats(); assert packet.delGen() < nextGen; assert updates.isEmpty() || updates.get(updates.size() - 1).delGen() < packet.delGen() : "Delete packets must be in order"; updates.add(packet); numTerms.addAndGet(packet.numTermDeletes); bytesUsed.addAndGet(packet.bytesUsed); if (infoStream.isEnabled("BD")) { infoStream.message( "BD", "push deletes " + packet + " delGen=" + packet.delGen() + " packetCount=" + updates.size() + " totBytesUsed=" + bytesUsed.get()); } assert checkDeleteStats(); return packet.delGen(); }
public int getNext() { int i = internal.addAndGet(1); if (i < 0) { return i + (-Integer.MIN_VALUE); } else { return i; } }
@Override public void run() { while (System.currentTimeMillis() < now + 3600000) { COUNT.addAndGet(1); System.out.println(COUNT.get() + ":" + api.friendshipsFollowers("1642591402", 9, 1, 0)); } }
void removePagesForLedger(long ledgerId) { int removedPageCount = pageMapAndList.removeEntriesForALedger(ledgerId); if (pageCount.addAndGet(-removedPageCount) < 0) { throw new RuntimeException( "Page count of ledger cache has been decremented to be less than zero."); } ledgersToFlush.remove(ledgerId); }
int updateSendWindowSize(int streamId, int deltaWindowSize) { if (streamId == SPDY_SESSION_STREAM_ID) { return sendWindowSize.addAndGet(deltaWindowSize); } StreamState state = activeStreams.get(streamId); return state != null ? state.updateSendWindowSize(deltaWindowSize) : -1; }
// This is not a efficient operation on a distributed sharded // flow store. We need to revisit the need for this operation or at least // make it device specific. @Override public int getFlowRuleCount() { AtomicInteger sum = new AtomicInteger(0); deviceService .getDevices() .forEach(device -> sum.addAndGet(Iterables.size(getFlowEntries(device.id())))); return sum.get(); }
private void addReceiveCount(String from, int amount) { // This is possibly lossy in the case where a value is deleted // because it has received no messages over the metrics collection // period and new messages are starting to come in. This is // because I don't want the overhead of a synchronize just to have // the metric be absolutely perfect. AtomicInteger i = messagesEnqueued.get(from); if (i == null) { i = new AtomicInteger(amount); AtomicInteger prev = messagesEnqueued.putIfAbsent(from, i); if (prev != null) { prev.addAndGet(amount); } } else { i.addAndGet(amount); } }
@Override public void onFailed(DownloadTask task) { downloadingCound.addAndGet(-1); ELog.i("Count:" + downloadingCound.get()); if (downloadingCound.get() == 0) { stopTimer(); } }
private synchronized void enqueueDrivers(boolean forceRunSplit, List<DriverSplitRunner> runners) { // schedule driver to be executed List<ListenableFuture<?>> finishedFutures = taskExecutor.enqueueSplits(taskHandle, forceRunSplit, runners); checkState( finishedFutures.size() == runners.size(), "Expected %s futures but got %s", runners.size(), finishedFutures.size()); // record new driver remainingDrivers.addAndGet(finishedFutures.size()); // when driver completes, update state and fire events for (int i = 0; i < finishedFutures.size(); i++) { ListenableFuture<?> finishedFuture = finishedFutures.get(i); final DriverSplitRunner splitRunner = runners.get(i); Futures.addCallback( finishedFuture, new FutureCallback<Object>() { @Override public void onSuccess(Object result) { try (SetThreadName setThreadName = new SetThreadName("Task-%s", taskId)) { // record driver is finished remainingDrivers.decrementAndGet(); checkTaskCompletion(); queryMonitor.splitCompletionEvent( taskId, splitRunner.getDriverContext().getDriverStats()); } } @Override public void onFailure(Throwable cause) { try (SetThreadName setThreadName = new SetThreadName("Task-%s", taskId)) { taskStateMachine.failed(cause); // record driver is finished remainingDrivers.decrementAndGet(); DriverContext driverContext = splitRunner.getDriverContext(); DriverStats driverStats; if (driverContext != null) { driverStats = driverContext.getDriverStats(); } else { // split runner did not start successfully driverStats = new DriverStats(); } // fire failed event with cause queryMonitor.splitFailedEvent(taskId, driverStats, cause); } } }, notificationExecutor); } }
/** * Consumes the given number of bytes from this {@link DataInfo}. * * @param delta the number of bytes consumed */ public void consume(int delta) { if (delta < 0) throw new IllegalArgumentException(); int read = length() - available(); int newConsumed = consumed() + delta; // if (newConsumed > read) // throw new IllegalStateException("Consuming without reading: consumed " + // newConsumed + " but only read " + read); consumed.addAndGet(delta); }
/** * Releases a permit, returning it to the semaphore. * * <p>Releases a permit, increasing the number of available permits by one. If any threads are * trying to acquire a permit, then one is selected and given the permit that was just released. * That thread is (re)enabled for thread scheduling purposes. * * <p>There is no requirement that a thread that releases a permit must have acquired that permit * by calling {@link #acquire}. Correct usage of a semaphore is established by programming * convention in the application. */ public void release() { if (cancel.get() > 0 && count.get() < 0) { processCancels(cancel.getAndSet(0)); } if (count.addAndGet(1) <= 0) { sem.release(); } }