public void deleteTestCase(long id) { String jsonTestCase = testCaseMap.get(id); if (jsonTestCase != null) { TestCase testCase = new Gson().fromJson(jsonTestCase, TestCase.class); testCaseIndex.remove(testCase.title); testCaseMap.remove(id); db.commit(); } }
/** remove(key,value) removes only if pair present */ public void testRemove2() { ConcurrentNavigableMap map = map5(); assertTrue(map.containsKey(five)); assertEquals("E", map.get(five)); map.remove(five, "E"); assertEquals(4, map.size()); assertFalse(map.containsKey(five)); map.remove(four, "A"); assertEquals(4, map.size()); assertTrue(map.containsKey(four)); }
/** remove(key,value) removes only if pair present */ public void testDescendingRemove2() { ConcurrentNavigableMap map = dmap5(); assertTrue(map.containsKey(m5)); assertEquals("E", map.get(m5)); map.remove(m5, "E"); assertEquals(4, map.size()); assertFalse(map.containsKey(m5)); map.remove(m4, "A"); assertEquals(4, map.size()); assertTrue(map.containsKey(m4)); }
public static void clearOrphan(ClusterAddressInfo address) { Integer delay = orphans.remove(address); delay = (delay == null ? 0 : delay); if (delay > 2) { LOG.warn("Forgetting stale orphan address mapping for " + address.toString()); } }
/** remove(null, x) throws NPE */ public void testRemove2_NullPointerException() { try { ConcurrentNavigableMap c = map5(); c.remove(null, "whatever"); shouldThrow(); } catch (NullPointerException success) { } }
/** remove(null) throws NPE */ public void testDescendingRemove1_NullPointerException() { try { ConcurrentNavigableMap c = dmap5(); c.remove(null); shouldThrow(); } catch (NullPointerException success) { } }
/** {@inheritDoc} */ @Override @Validate public V remove(@NotNull final T key, @NotNull final K subkey) { ConcurrentNavigableMap<K, V> m = map.get(key); if (m != null) return m.remove(subkey); return null; }
public static void handleOrphan(Cluster cluster, ClusterAddressInfo address) { Integer orphanCount = 1; orphanCount = orphans.putIfAbsent(address, orphanCount); orphanCount = (orphanCount == null) ? 1 : orphanCount; orphans.put(address, orphanCount + 1); EventRecord.caller( ClusterState.class, EventType.ADDRESS_STATE, "Updated orphaned public ip address: " + LogUtil.dumpObject(address) + " count=" + orphanCount) .debug(); if (orphanCount > AddressingConfiguration.getInstance().getMaxKillOrphans()) { EventRecord.caller( ClusterState.class, EventType.ADDRESS_STATE, "Unassigning orphaned public ip address: " + LogUtil.dumpObject(address) + " count=" + orphanCount) .warn(); try { final Address addr = Addresses.getInstance().lookup(address.getAddress()); if (addr.isPending()) { try { addr.clearPending(); } catch (Exception ex) { } } try { if (addr.isAssigned() && "0.0.0.0".equals(address.getInstanceIp())) { addr.unassign().clearPending(); if (addr.isSystemOwned()) { addr.release(); } } else if (addr.isAssigned() && !"0.0.0.0".equals(address.getInstanceIp())) { AsyncRequests.newRequest(new UnassignAddressCallback(address)) .sendSync(cluster.getConfiguration()); if (addr.isSystemOwned()) { addr.release(); } } else if (!addr.isAssigned() && addr.isAllocated() && addr.isSystemOwned()) { addr.release(); } } catch (ExecutionException ex) { if (!addr.isAssigned() && addr.isAllocated() && addr.isSystemOwned()) { addr.release(); } } } catch (InterruptedException ex) { Exceptions.maybeInterrupted(ex); } catch (NoSuchElementException ex) { } finally { orphans.remove(address); } } }
/** {@inheritDoc} */ @Override @Validate public V removeSubKey(@NotNull final K subkey) { for (ConcurrentNavigableMap<K, V> m : map.values()) { V value = m.get(subkey); if (value != null) return m.remove(subkey); } return null; }
private static void handleOrphan(String cluster, Address address) { Integer orphanCount = 1; orphanCount = orphans.putIfAbsent(address.getName(), orphanCount); orphanCount = (orphanCount == null) ? 1 : orphanCount; orphans.put(address.getName(), orphanCount + 1); LOG.warn("Found orphaned public ip address: " + address + " count=" + orphanCount); if (orphanCount > 10) { orphans.remove(address.getName()); Clusters.dispatchClusterEvent(cluster, new UnassignAddressCallback(address)); } }
/** {@inheritDoc} */ @Override @Validate public V[] removeSubKeys(@NotNull final K subkey) { List<V> list = new ArrayList<V>(); for (ConcurrentNavigableMap<K, V> m : map.values()) { V value = m.remove(subkey); if (value != null) list.add(value); } V[] result = Linq.toArray(list, getGenericTypeParameterValue()); if (result.length == 0) result = null; return result; }
public static void update(String cluster, List<Pair> ccList) { List<String> ccListAddrs = Lists.transform( ccList, new Function<Pair, String>() { @Override public String apply(Pair p) { return p.getLeft(); } }); for (Pair p : ccList) { Address address = AddressUtil.lookupOrCreate(cluster, p); try { InetAddress addr = Inet4Address.getByName(p.getRight()); VmInstance vm; try { vm = VmInstances.getInstance().lookupByInstanceIp(p.getRight()); if (Address.UNALLOCATED_USERID.equals(address.getUserId())) { address.allocate(Component.eucalyptus.name()); } if (!address.isAssigned()) { address.setAssigned(vm.getInstanceId(), p.getRight()); } orphans.remove(address.getName()); } catch (Exception e1) { if (!addr.isLoopbackAddress() && !AddressUtil.checkForPendingVm()) { AddressUtil.handleOrphan(cluster, address); } else { orphans.remove(address.getName()); } } } catch (UnknownHostException e1) { LOG.debug(e1, e1); orphans.remove(address.getName()); } } }
private void cleanup() { try { if ((this.transaction != null) && this.transaction.isActive()) { this.transaction.rollback(); } this.transaction = null; if ((this.session != null) && (this.session.get() != null)) { this.session.clear(); } if ((this.em != null) && this.em.isOpen()) { this.em.close(); } this.em = null; } finally { outstanding.remove(this.txUuid); } }
private void registerRemoval(String id, long lifeTime, TimeUnit unit) { ITaskManagerHook hook = taskManagerHooks.get(id); if (hook != null) { long removalTime = unit.toMillis(lifeTime) + System.currentTimeMillis(); // Remove current scheduled removal Long currentRemovalTime = taskManagerRemovalBackRegister.get(hook); if (currentRemovalTime != null) { taskManagerRemovalRegister.remove(currentRemovalTime); } // Find an empty spot in the sorted map's key register removalTime--; do { removalTime++; taskManagerRemovalRegister.putIfAbsent(removalTime, hook); } while (taskManagerRemovalRegister.get(removalTime) != hook); // Back reference the removal taskManagerRemovalBackRegister.put(hook, removalTime); } }
public boolean removeBinding(ResourceAddress address, Binding binding) { ResourceAddress bindAddress = binding.bindAddress(); String nextProtocol = bindAddress.getOption(NEXT_PROTOCOL); if (nextProtocol == null) { Binding oldBinding = nullNextProtocol.get(); if (equivalent(oldBinding, binding)) { binding = oldBinding; } if (binding.decrementReferenceCount() == 0) { return nullNextProtocol.compareAndSet(binding, null); } return false; } Binding oldBinding = nextProtocols.get(nextProtocol); if (equivalent(oldBinding, binding)) { binding = oldBinding; } if (binding.decrementReferenceCount() == 0) { return nextProtocols.remove(nextProtocol, binding); } return false; }
/** pollLastEntry returns entries in order */ public void testDescendingPollLastEntry() { ConcurrentNavigableMap map = dmap5(); Map.Entry e = map.pollLastEntry(); assertEquals(m5, e.getKey()); assertEquals("E", e.getValue()); e = map.pollLastEntry(); assertEquals(m4, e.getKey()); map.put(m5, "E"); e = map.pollLastEntry(); assertEquals(m5, e.getKey()); assertEquals("E", e.getValue()); e = map.pollLastEntry(); assertEquals(m3, e.getKey()); map.remove(m2); e = map.pollLastEntry(); assertEquals(m1, e.getKey()); try { e.setValue("E"); shouldThrow(); } catch (UnsupportedOperationException success) { } e = map.pollLastEntry(); assertNull(e); }
/** pollLastEntry returns entries in order */ public void testPollLastEntry() { ConcurrentNavigableMap map = map5(); Map.Entry e = map.pollLastEntry(); assertEquals(five, e.getKey()); assertEquals("E", e.getValue()); e = map.pollLastEntry(); assertEquals(four, e.getKey()); map.put(five, "E"); e = map.pollLastEntry(); assertEquals(five, e.getKey()); assertEquals("E", e.getValue()); e = map.pollLastEntry(); assertEquals(three, e.getKey()); map.remove(two); e = map.pollLastEntry(); assertEquals(one, e.getKey()); try { e.setValue("E"); shouldThrow(); } catch (UnsupportedOperationException success) { } e = map.pollLastEntry(); assertNull(e); }
public void remove(String key) { treeMap.remove(key); db.commit(); }
/** remove removes the correct key-value pair from the map */ public void testDescendingRemove() { ConcurrentNavigableMap map = dmap5(); map.remove(m5); assertEquals(4, map.size()); assertFalse(map.containsKey(m5)); }
/** * The async processing loop that writes to the data files and does the force calls. Since the * file sync() call is the slowest of all the operations, this algorithm tries to 'batch' or group * together several file sync() requests into a single file sync() call. The batching is * accomplished attaching the same CountDownLatch instance to every force request in a group. */ private void processQueue() { DataFile dataFile = null; RandomAccessFile file = null; try { DataByteArrayOutputStream buff = new DataByteArrayOutputStream(journal.getMaxWriteBatchSize()); boolean last = false; while (true) { WriteBatch wb = batchQueue.take(); if (shutdown) { last = true; } if (!wb.writes.isEmpty()) { boolean newOrRotated = dataFile != wb.dataFile; if (newOrRotated) { if (file != null) { dataFile.closeRandomAccessFile(file); } dataFile = wb.dataFile; file = dataFile.openRandomAccessFile(); } // Write an empty batch control record. buff.reset(); buff.writeInt(Journal.BATCH_CONTROL_RECORD_SIZE); buff.writeByte(Journal.BATCH_CONTROL_RECORD_TYPE); buff.write(Journal.BATCH_CONTROL_RECORD_MAGIC); buff.writeInt(0); buff.writeLong(0); boolean forceToDisk = false; WriteCommand control = wb.writes.poll(); WriteCommand first = wb.writes.peek(); WriteCommand latest = null; for (WriteCommand current : wb.writes) { forceToDisk |= current.sync; buff.writeInt(current.location.getSize()); buff.writeByte(current.location.getType()); buff.write(current.data.getData(), current.data.getOffset(), current.data.getLength()); latest = current; } Buffer sequence = buff.toBuffer(); // Now we can fill in the batch control record properly. buff.reset(); buff.skip(Journal.HEADER_SIZE + Journal.BATCH_CONTROL_RECORD_MAGIC.length); buff.writeInt(sequence.getLength() - Journal.BATCH_CONTROL_RECORD_SIZE); if (journal.isChecksum()) { Checksum checksum = new Adler32(); checksum.update( sequence.getData(), sequence.getOffset() + Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength() - Journal.BATCH_CONTROL_RECORD_SIZE); buff.writeLong(checksum.getValue()); } // Now do the 1 big write. file.seek(wb.offset); file.write(sequence.getData(), sequence.getOffset(), sequence.getLength()); ReplicationTarget replicationTarget = journal.getReplicationTarget(); if (replicationTarget != null) { replicationTarget.replicate(control.location, sequence, forceToDisk); } if (forceToDisk) { IOHelper.sync(file.getFD()); } journal.setLastAppendLocation(latest.location); // Now that the data is on disk, remove the writes from the in // flight // cache. inflightWrites.remove(control.location); for (WriteCommand current : wb.writes) { if (!current.sync) { inflightWrites.remove(current.location); } } if (journal.getListener() != null) { try { journal.getListener().synced(wb.writes.toArray(new WriteCommand[wb.writes.size()])); } catch (Throwable ex) { warn(ex, ex.getMessage()); } } // Clear unused data: wb.writes.clear(); // Signal any waiting threads that the write is on disk. wb.latch.countDown(); } if (last) { break; } } } catch (Exception e) { firstAsyncException.compareAndSet(null, e); } finally { try { if (file != null) { dataFile.closeRandomAccessFile(file); } } catch (Throwable ignore) { } shutdownDone.countDown(); } }
/** remove removes the correct key-value pair from the map */ public void testRemove() { ConcurrentNavigableMap map = map5(); map.remove(five); assertEquals(4, map.size()); assertFalse(map.containsKey(five)); }