/** * Removes an allocation threshold action. * * <p>Removes the allocation threshold action for the given level and direction. * * @param direction registered actions direction * @param threshold registered actions threshold level * @return the removed condition or {@code null} if no action was present. */ public synchronized Runnable removeAllocationThreshold( ThresholdDirection direction, long threshold) { switch (direction) { case RISING: return risingThresholds.remove(threshold); case FALLING: return fallingThresholds.remove(threshold); } throw new AssertionError(); }
/** public for testing purposes */ public synchronized void removeOldBackups() { SortedMap<Long, File> filesMap = getBackupFiles(); if (filesMap.size() > 0) { Calendar rangeStart = TaskActivityUtil.getCalendar(); rangeStart.setTimeInMillis(filesMap.lastKey()); TaskActivityUtil.snapStartOfHour(rangeStart); int startHour = rangeStart.get(Calendar.HOUR_OF_DAY); Calendar rangeEnd = TaskActivityUtil.getCalendar(); rangeEnd.setTimeInMillis(rangeStart.getTimeInMillis()); rangeEnd.add(Calendar.HOUR_OF_DAY, 1); // Keep one backup for last 8 hours of today for (int x = 0; x <= startHour && x < 9; x++) { SortedMap<Long, File> subMap = filesMap.subMap(rangeStart.getTimeInMillis(), rangeEnd.getTimeInMillis()); if (subMap.size() > 1) { while (subMap.size() > 1) { File toDelete = subMap.remove(subMap.firstKey()); toDelete.delete(); } } rangeStart.add(Calendar.HOUR_OF_DAY, -1); rangeEnd.add(Calendar.HOUR_OF_DAY, -1); } // Keep one backup a day for the past 12 days TaskActivityUtil.snapStartOfDay(rangeEnd); rangeStart.add(Calendar.DAY_OF_YEAR, -1); for (int x = 1; x <= 12; x++) { SortedMap<Long, File> subMap = filesMap.subMap(rangeStart.getTimeInMillis(), rangeEnd.getTimeInMillis()); if (subMap.size() > 1) { while (subMap.size() > 1) { File toDelete = subMap.remove(subMap.firstKey()); toDelete.delete(); } } rangeStart.add(Calendar.DAY_OF_YEAR, -1); rangeEnd.add(Calendar.DAY_OF_YEAR, -1); } // Remove all older backups SortedMap<Long, File> subMap = filesMap.subMap(0l, rangeStart.getTimeInMillis()); if (subMap.size() > 0) { while (subMap.size() > 0) { File toDelete = subMap.remove(subMap.firstKey()); toDelete.delete(); } } } }
/** * Set a function handler for the specific named function optionally tagging the function as * impure, replacing any existing handler for the given name; if the handler is null the function * handler is removed. * * <p>Pure functions have results which depend purely on their arguments; given constant arguments * they will have a constant result. Impure functions are rare. */ public MathEval setFunctionHandler(String nam, FunctionHandler hdl, boolean impure) { validateName(nam); if (hdl == null) { pureFunctions.remove(nam); impureFunctions.remove(nam); } else if (impure) { pureFunctions.remove(nam); impureFunctions.put(nam, hdl); } else { pureFunctions.put(nam, hdl); impureFunctions.remove(nam); } return this; }
/** * Abort the transaction. * * @param transactionId * @throws IOException */ public void abort(final long transactionId) throws IOException { // Not checking closing... TransactionState state; try { state = getTransactionState(transactionId); } catch (UnknownTransactionException e) { LOG.info( "Asked to abort unknown transaction [" + transactionId + "] in region [" + getRegionInfo().getRegionNameAsString() + "], ignoring"); return; } state.setStatus(Status.ABORTED); if (state.hasWrite()) { this.transactionLog.writeAbortToLog(super.getRegionInfo(), state.getTransactionId()); } // Following removes needed if we have voted if (state.getSequenceNumber() != null) { commitedTransactionsBySequenceNumber.remove(state.getSequenceNumber()); } commitPendingTransactions.remove(state); retireTransaction(state); }
public void noMoreInstances() throws MaltChainedException { // if (getGuide().getGuideMode() == Guide.GuideMode.CLASSIFY) { // throw new GuideException("Can only finish all data during learning. "); // } if (divideModels != null) { divideFeature.updateCardinality(); for (Integer index : divideModels.keySet()) { divideModels.get(index).noMoreInstances(); } final TreeSet<Integer> removeSet = new TreeSet<Integer>(); for (Integer index : divideModels.keySet()) { if (divideModels.get(index).getFrequency() <= divideThreshold) { divideModels .get(index) .moveAllInstances(masterModel, divideFeature, divideFeatureIndexVector); removeSet.add(index); } } for (Integer index : removeSet) { divideModels.remove(index); } masterModel.noMoreInstances(); } else { throw new GuideException("The feature divide models cannot be found. "); } }
public void testSubMapContents2() { ConcurrentNavigableMap map = map5(); SortedMap sm = map.subMap(two, three); assertEquals(1, sm.size()); assertEquals(two, sm.firstKey()); assertEquals(two, sm.lastKey()); assertFalse(sm.containsKey(one)); assertTrue(sm.containsKey(two)); assertFalse(sm.containsKey(three)); assertFalse(sm.containsKey(four)); assertFalse(sm.containsKey(five)); Iterator i = sm.keySet().iterator(); Object k; k = (Integer) (i.next()); assertEquals(two, k); assertFalse(i.hasNext()); Iterator j = sm.keySet().iterator(); j.next(); j.remove(); assertFalse(map.containsKey(two)); assertEquals(4, map.size()); assertEquals(0, sm.size()); assertTrue(sm.isEmpty()); assertSame(sm.remove(three), null); assertEquals(4, map.size()); }
@Override public void onNodeRemoved(OperationsNodeInfo node) { for (int i = 0; i < replicas; i++) { LOG.trace("Removing node {} replica {} from the circle", node.getConnectionInfo(), i); circle.remove(hash(node, i)); } }
// BEGIN IA/HERITRIX CHANGES public /*synchronized*/ void addCookie(Cookie cookie) { LOG.trace("enter HttpState.addCookie(Cookie)"); // PRIOR IMPL & COMPARISON HARNESS LEFT COMMENTED OUT FOR TEMPORARY REFERENCE // Cookie removed1 = null; // Cookie removed2 = null; if (cookie != null) { // first remove any old cookie that is equivalent // for (Iterator it = cookiesArrayList.iterator(); it.hasNext();) { // Cookie tmp = (Cookie) it.next(); // if (cookie.equals(tmp)) { // it.remove(); // removed1 = tmp; // break; // } // } if (!cookie.isExpired()) { // cookiesArrayList.add(cookie); cookiesMap.put(cookie.getSortKey(), cookie); } else { cookiesMap.remove(cookie.getSortKey()); } } // if(removed1!=null && !removed1.equals(removed2)) { // System.out.println("addCookie discrepancy"); // } // END IA/HERITRIX CHANGES }
private void updateParentChildren( SortedMap<String, Requirement> map, String parent, Requirement entry) { Requirement parentRequirement = map.get(parent); Requirement updatedParentRequirement = parentRequirement.withChild(entry); map.remove(parent); map.put(parent, updatedParentRequirement); }
public Channel getChannel(String name) { synchronized (channels) { Channel channel = channels.get(name); if (channel == null) { boolean resize = false; channel = channels.remove(INITIAL); if (channel == null) { channel = new Channel(name); resize = true; } else { channel.setName(name); } channels.put(name, channel); channelArray = channels.values().toArray(new Channel[channels.size()]); if (resize) { getDisplay() .syncExec( new Runnable() { public void run() { controlResized(null); } }); } } return channel; } }
/** * Removes the value at the specified key * * @param construct * @return */ public Construct remove(Construct construct) { String c = normalizeConstruct(construct); Construct ret; if (!associative_mode) { try { ret = array.remove(Integer.parseInt(c)); next_index--; } catch (NumberFormatException e) { throw ConfigRuntimeException.BuildException( "Expecting an integer, but received \"" + c + "\" (were you expecting an associative array? This array is a normal array.)", ExceptionType.CastException, construct.getTarget()); } catch (IndexOutOfBoundsException e) { throw ConfigRuntimeException.BuildException( "Cannot remove the value at '" + c + "', as no such index exists in the array", ExceptionType.RangeException, construct.getTarget()); } } else { ret = associative_array.remove(c); } regenValue(new HashSet<CArray>()); return ret; }
public synchronized void removeField(String fieldName) { if ((fieldName == null) || (fieldName.equals(""))) { return; } descriptorMap.remove(fieldName); }
private synchronized TaskAttemptId getAndRemove(int volumeId) { TaskAttemptId taskAttemptId = null; if (!unassignedTaskForEachVolume.containsKey(volumeId)) { if (volumeId > REMOTE) { diskVolumeLoads.remove(volumeId); } return taskAttemptId; } LinkedHashSet<TaskAttempt> list = unassignedTaskForEachVolume.get(volumeId); if (list != null && !list.isEmpty()) { TaskAttempt taskAttempt; synchronized (unassignedTaskForEachVolume) { Iterator<TaskAttempt> iterator = list.iterator(); taskAttempt = iterator.next(); iterator.remove(); } taskAttemptId = taskAttempt.getId(); for (DataLocation location : taskAttempt.getTask().getDataLocations()) { HostVolumeMapping volumeMapping = scheduledRequests.leafTaskHostMapping.get(location.getHost()); if (volumeMapping != null) { volumeMapping.removeTaskAttempt(location.getVolumeId(), taskAttempt); } } increaseConcurrency(volumeId); } return taskAttemptId; }
/* If you set jmx.serial.form to "1.2.0" or "1.2.1", then we are bug-compatible with those versions. Specifically, field names are forced to lower-case before being written. This contradicts the spec, which, though it does not mention serialization explicitly, does say that the case of field names is preserved. But in 1.2.0 and 1.2.1, this requirement was not met. Instead, field names in the descriptor map were forced to lower case. Those versions expect this to have happened to a descriptor they deserialize and e.g. getFieldValue will not find a field whose name is spelt with a different case. */ private void writeObject(ObjectOutputStream out) throws IOException { ObjectOutputStream.PutField fields = out.putFields(); boolean compat = "1.0".equals(serialForm); if (compat) fields.put("currClass", currClass); /* Purge the field "targetObject" from the DescriptorSupport before * serializing since the referenced object is typically not * serializable. We do this here rather than purging the "descriptor" * variable below because that HashMap doesn't do case-insensitivity. * See CR 6332962. */ SortedMap<String, Object> startMap = descriptorMap; if (startMap.containsKey("targetObject")) { startMap = new TreeMap<String, Object>(descriptorMap); startMap.remove("targetObject"); } final HashMap<String, Object> descriptor; if (compat || "1.2.0".equals(serialForm) || "1.2.1".equals(serialForm)) { descriptor = new HashMap<String, Object>(); for (Map.Entry<String, Object> entry : startMap.entrySet()) descriptor.put(entry.getKey().toLowerCase(), entry.getValue()); } else descriptor = new HashMap<String, Object>(startMap); fields.put("descriptor", descriptor); out.writeFields(); }
/** * rename named record into newName * * @param oldName current name of record/collection * @param newName new name of record/collection * @throws NoSuchElementException if oldName does not exist */ public synchronized void rename(String oldName, String newName) { if (oldName.equals(newName)) return; Map<String, Object> sub = catalog.tailMap(oldName); List<String> toRemove = new ArrayList<String>(); for (String param : sub.keySet()) { if (!param.startsWith(oldName)) break; String suffix = param.substring(oldName.length()); catalog.put(newName + suffix, catalog.get(param)); toRemove.add(param); } if (toRemove.isEmpty()) throw new NoSuchElementException("Could not rename, name does not exist: " + oldName); WeakReference old = namesInstanciated.remove(oldName); if (old != null) { Object old2 = old.get(); if (old2 != null) { namesLookup.remove(old2); namedPut(newName, old2); } } for (String param : toRemove) catalog.remove(param); }
public void testDescendingSubMapContents2() { ConcurrentNavigableMap map = dmap5(); SortedMap sm = map.subMap(m2, m3); assertEquals(1, sm.size()); assertEquals(m2, sm.firstKey()); assertEquals(m2, sm.lastKey()); assertFalse(sm.containsKey(m1)); assertTrue(sm.containsKey(m2)); assertFalse(sm.containsKey(m3)); assertFalse(sm.containsKey(m4)); assertFalse(sm.containsKey(m5)); Iterator i = sm.keySet().iterator(); Object k; k = (Integer) (i.next()); assertEquals(m2, k); assertFalse(i.hasNext()); Iterator j = sm.keySet().iterator(); j.next(); j.remove(); assertFalse(map.containsKey(m2)); assertEquals(4, map.size()); assertEquals(0, sm.size()); assertTrue(sm.isEmpty()); assertSame(sm.remove(m3), null); assertEquals(4, map.size()); }
public synchronized boolean put(final String key, String value) { String oldvalue = properties.get(key); if (value != null && value.length() == 0) { value = null; } if (!((oldvalue == null && (value == null || value.equals(defaults.get(key)))) || (value != null && oldvalue != null && oldvalue.equals(value)))) { if (value == null) { properties.remove(key); } else { properties.put(key, value); } try { save(); } catch (IOException e) { System.out.println( tr( "Warning: failed to persist preferences to ''{0}''", getPreferenceFile().getAbsoluteFile())); } firePrefrenceChanged(key, oldvalue, value); return true; } return false; }
@Test public void testAssignMigrations() { servers.clear(); servers.put( new TServerInstance(HostAndPort.fromParts("127.0.0.1", 1234), "a"), new FakeTServer()); servers.put( new TServerInstance(HostAndPort.fromParts("127.0.0.1", 1235), "b"), new FakeTServer()); servers.put( new TServerInstance(HostAndPort.fromParts("127.0.0.1", 1236), "c"), new FakeTServer()); List<KeyExtent> metadataTable = new ArrayList<KeyExtent>(); String table = "t1"; metadataTable.add(makeExtent(table, null, null)); table = "t2"; metadataTable.add(makeExtent(table, "a", null)); metadataTable.add(makeExtent(table, null, "a")); table = "t3"; metadataTable.add(makeExtent(table, "a", null)); metadataTable.add(makeExtent(table, "b", "a")); metadataTable.add(makeExtent(table, "c", "b")); metadataTable.add(makeExtent(table, "d", "c")); metadataTable.add(makeExtent(table, "e", "d")); metadataTable.add(makeExtent(table, null, "e")); Collections.sort(metadataTable); TestDefaultLoadBalancer balancer = new TestDefaultLoadBalancer(); SortedMap<TServerInstance, TabletServerStatus> current = new TreeMap<TServerInstance, TabletServerStatus>(); for (Entry<TServerInstance, FakeTServer> entry : servers.entrySet()) { current.put(entry.getKey(), entry.getValue().getStatus(entry.getKey())); } assignTablets(metadataTable, servers, current, balancer); // Verify that the counts on the tables are correct Map<String, Integer> expectedCounts = new HashMap<String, Integer>(); expectedCounts.put("t1", 1); expectedCounts.put("t2", 1); expectedCounts.put("t3", 2); checkBalance(metadataTable, servers, expectedCounts); // Rebalance once for (Entry<TServerInstance, FakeTServer> entry : servers.entrySet()) { current.put(entry.getKey(), entry.getValue().getStatus(entry.getKey())); } // Nothing should happen, we are balanced ArrayList<TabletMigration> out = new ArrayList<TabletMigration>(); balancer.getMigrations(current, out); assertEquals(out.size(), 0); // Take down a tabletServer TServerInstance first = current.keySet().iterator().next(); current.remove(first); FakeTServer remove = servers.remove(first); // reassign offline extents assignTablets(remove.extents, servers, current, balancer); checkBalance(metadataTable, servers, null); }
/** * Removes an attribute from this resource class. * * @param attr an attribute. * @throws EntityDoesNotExistException if the class does not declare the attribute. */ public void deleteAttribute(Attribute attr) throws EntityDoesNotExistException { if (!attributes.containsKey(attr.getName())) { throw new EntityDoesNotExistException( "class " + getName() + " does not declare attribute " + "named " + attr.getName()); } attributes.remove(attr.getName()); attr.setDeclaringClass(null); }
/** * @param cytobandResultset Removes cytobandResultset to this CopyNumberResultsContainer object. */ public void removeCytobandResultset(CytobandResultset cytobandResultset) { if (cytobandResultset != null && cytobandResultset.getCytoband() != null) { cytobands.remove(cytobandResultset.getCytoband().toString()); if (cytobandResultset.getReporterNames() != null) { reporterNames.removeAll(cytobandResultset.getReporterNames()); } } }
/** * Flush the nodes in order, from the lowest level to highest level. As a flush dirties its * parent, add it to the dirty map, thereby cascading the writes up the tree. If flushAll wasn't * specified, we need only cascade up to the highest level that existed before the checkpointing * started. * * <p>Note that all but the top level INs and the BINDeltas are logged provisionally. That's * because we don't need to process lower INs because the higher INs will end up pointing at them. */ private void flushDirtyNodes(boolean flushAll, boolean allowDeltas, boolean flushExtraLevel) throws DatabaseException { LogManager logManager = envImpl.getLogManager(); SortedMap dirtyMap = selectDirtyINs(flushAll, flushExtraLevel); while (dirtyMap.size() > 0) { /* Work on one level's worth of nodes in ascending level order. */ Integer currentLevel = (Integer) dirtyMap.firstKey(); boolean logProvisionally = (currentLevel.intValue() != highestFlushLevel); Set nodeSet = (Set) dirtyMap.get(currentLevel); Iterator iter = nodeSet.iterator(); /* Flush all those nodes */ while (iter.hasNext()) { IN target = (IN) iter.next(); target.latch(); boolean triedToFlush = false; /* * Only flush the ones that are still dirty -- some * may have been written out by the evictor. Also * check if the db is still valid -- since INs of * deleted databases are left on the in-memory tree * until the evictor lazily clears them out, there may * be dead INs around. */ if (target.getDirty() && (!target.getDatabase().getIsDeleted())) { flushIN(target, logManager, dirtyMap, logProvisionally, allowDeltas); triedToFlush = true; } else { target.releaseLatch(); } Tracer.trace( Level.FINE, envImpl, "Checkpointer: node=" + target.getNodeId() + " level=" + Integer.toHexString(target.getLevel()) + " flushed=" + triedToFlush); } /* We're done with this level. */ dirtyMap.remove(currentLevel); /* We can stop at this point. */ if (currentLevel.intValue() == highestFlushLevel) { break; } } }
/** * Set a named variable (variables names are not case-sensitive). If the value is null, the * variable is removed. */ public MathEval setVariable(String nam, Double val) { validateName(nam); if (val == null) { variables.remove(nam); } else { variables.put(nam, val); } return this; }
public double remove(String omxId) throws ParserConfigurationException { if (stockmap.containsKey(omxId)) { events.addEvent("Stock removed from portfolio", omxId, invest.getShortName(omxId)); updateLatestBuy(); updateLatestSell(); return stockmap.remove(omxId); } return 0; }
/** Add a segment with a map */ public void addSegment(short startCode, short endCode, char[] map) { if (map.length != (endCode - startCode) + 1) { throw new IllegalArgumentException("Wrong number of entries in map"); } Segment s = new Segment(startCode, endCode, true); // make sure we remove any old entries segments.remove(s); segments.put(s, map); }
/** Decrease the count of running tasks of a certain task runner */ private synchronized void decreaseConcurrency(int volumeId) { if (diskVolumeLoads.containsKey(volumeId)) { Integer concurrency = diskVolumeLoads.get(volumeId); if (concurrency > 0) { diskVolumeLoads.put(volumeId, concurrency - 1); } else { if (volumeId > REMOTE && !unassignedTaskForEachVolume.containsKey(volumeId)) { diskVolumeLoads.remove(volumeId); } } } }
private synchronized void removeTaskAttempt(int volumeId, TaskAttempt taskAttempt) { if (!unassignedTaskForEachVolume.containsKey(volumeId)) return; LinkedHashSet<TaskAttempt> tasks = unassignedTaskForEachVolume.get(volumeId); if (tasks.remove(taskAttempt)) { remainTasksNum.getAndDecrement(); } if (tasks.isEmpty()) { unassignedTaskForEachVolume.remove(volumeId); if (volumeId > REMOTE) { diskVolumeLoads.remove(volumeId); } } }
public void popTemporaryModule(String moduleName, int handle) { synchronized (lockTemporaryModules) { SortedMap<Integer, IModule> stack = temporaryModules.get(moduleName); try { if (stack != null) { stack.remove(handle); if (stack.size() == 0) { temporaryModules.remove(moduleName); } } } catch (Throwable e) { Log.log(e); } } }
@Override public void fail(final Object o) { if (o instanceof KafkaMessageId) { final KafkaMessageId id = (KafkaMessageId) o; // delegate decision of replaying the message to failure policy if (_failHandler.shouldReplay(id)) { LOG.debug("kafka message id {} failed in topology, adding to buffer again", id); _queue.add(id); } else { LOG.debug("kafka message id {} failed in topology, delegating failure to policy", id); // remove message from pending; _failHandler will take action if needed _failHandler.fail(id, _inProgress.remove(id)); } } }
public IContainer next() { String key = containers.firstKey(); IContainer result = containers.get(key); containers.remove(key); key += "/"; // Remove child containers SortedMap<String, IContainer> childs = containers.tailMap(key); for (Iterator<String> it = childs.keySet().iterator(); it.hasNext(); ) { if (it.next().startsWith(key)) it.remove(); else break; } return result; }
/** Cleanup references to committed transactions that are no longer needed. */ synchronized void removeUnNeededCommitedTransactions() { Integer minStartSeqNumber = getMinStartSequenceNumber(); if (minStartSeqNumber == null) { minStartSeqNumber = Integer.MAX_VALUE; // Remove all } int numRemoved = 0; // Copy list to avoid conc update exception for (Entry<Integer, TransactionState> entry : new LinkedList<Entry<Integer, TransactionState>>( commitedTransactionsBySequenceNumber.entrySet())) { if (entry.getKey() >= minStartSeqNumber) { break; } numRemoved = numRemoved + (commitedTransactionsBySequenceNumber.remove(entry.getKey()) == null ? 0 : 1); numRemoved++; } if (LOG.isDebugEnabled()) { StringBuilder debugMessage = new StringBuilder(); if (numRemoved > 0) { debugMessage.append("Removed [").append(numRemoved).append("] commited transactions"); if (minStartSeqNumber == Integer.MAX_VALUE) { debugMessage.append(" with any sequence number."); } else { debugMessage.append(" with sequence lower than [").append(minStartSeqNumber).append("]."); } if (!commitedTransactionsBySequenceNumber.isEmpty()) { debugMessage .append(" Still have [") .append(commitedTransactionsBySequenceNumber.size()) .append("] left."); } else { debugMessage.append(" None left."); } LOG.debug(debugMessage.toString()); } else if (commitedTransactionsBySequenceNumber.size() > 0) { debugMessage .append("Could not remove any transactions, and still have ") .append(commitedTransactionsBySequenceNumber.size()) .append(" left"); LOG.debug(debugMessage.toString()); } } }