/** * Increases the capacity of and internally reorganizes this hashtable, in order to accommodate * and access its entries more efficiently. This method is called automatically when the number of * keys in the hashtable exceeds this hashtable's capacity and load factor. */ @SuppressWarnings("unchecked") protected void rehash() { int oldCapacity = table.length; Entry<?, ?>[] oldMap = table; // overflow-conscious code int newCapacity = (oldCapacity << 1) + 1; if (newCapacity - MAX_ARRAY_SIZE > 0) { if (oldCapacity == MAX_ARRAY_SIZE) // Keep running with MAX_ARRAY_SIZE buckets return; newCapacity = MAX_ARRAY_SIZE; } Entry<?, ?>[] newMap = new Entry<?, ?>[newCapacity]; modCount++; threshold = (int) Math.min(newCapacity * loadFactor, MAX_ARRAY_SIZE + 1); table = newMap; for (int i = oldCapacity; i-- > 0; ) { for (Entry<K, V> old = (Entry<K, V>) oldMap[i]; old != null; ) { Entry<K, V> e = old; old = old.next; int index = (e.hash & 0x7FFFFFFF) % newCapacity; e.next = (Entry<K, V>) newMap[index]; newMap[index] = e; } } }
/** * Compares the specified object with this map for equality. Returns <tt>true</tt> if the given * object is also a map and the two maps represent the same mappings. More formally, two maps * <tt>m1</tt> and <tt>m2</tt> represent the same mappings if * <tt>m1.entrySet().equals(m2.entrySet())</tt>. This ensures that the <tt>equals</tt> method * works properly across different implementations of the <tt>Map</tt> interface. * * <p>This implementation first checks if the specified object is this map; if so it returns * <tt>true</tt>. Then, it checks if the specified object is a map whose size is identical to the * size of this map; if not, it returns <tt>false</tt>. If so, it iterates over this map's * <tt>entrySet</tt> collection, and checks that the specified map contains each mapping that this * map contains. If the specified map fails to contain such a mapping, <tt>false</tt> is returned. * If the iteration completes, <tt>true</tt> is returned. * * @param o object to be compared for equality with this map * @return <tt>true</tt> if the specified object is equal to this map */ public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Map)) return false; Map<K, V> m = (Map<K, V>) o; if (m.size() != size()) return false; try { Iterator<Entry<K, V>> i = entrySet().iterator(); while (i.hasNext()) { Entry<K, V> e = i.next(); K key = e.getKey(); V value = e.getValue(); if (value == null) { if (!(m.get(key) == null && m.containsKey(key))) return false; } else { if (!value.equals(m.get(key))) return false; } } } catch (ClassCastException unused) { return false; } catch (NullPointerException unused) { return false; } return true; }
public final VALUE remove(int index) { final Entry<KEY, VALUE> entry = removeList(index); final VALUE value = entry.value; removeMap(entry.key); entry.value = null; return value; }
Entry encode(final T o, final String parentDN) throws LDAPPersistException { // Get the attributes that should be included in the entry. final LinkedHashMap<String, Attribute> attrMap = new LinkedHashMap<String, Attribute>(); attrMap.put("objectClass", objectClassAttribute); for (final Map.Entry<String, FieldInfo> e : fieldMap.entrySet()) { final FieldInfo i = e.getValue(); if (!i.includeInAdd()) { continue; } final Attribute a = i.encode(o, false); if (a != null) { attrMap.put(e.getKey(), a); } } for (final Map.Entry<String, GetterInfo> e : getterMap.entrySet()) { final GetterInfo i = e.getValue(); if (!i.includeInAdd()) { continue; } final Attribute a = i.encode(o); if (a != null) { attrMap.put(e.getKey(), a); } } final String dn = constructDN(o, parentDN, attrMap); final Entry entry = new Entry(dn, attrMap.values()); if (postEncodeMethod != null) { try { postEncodeMethod.invoke(o, entry); } catch (Throwable t) { debugException(t); if (t instanceof InvocationTargetException) { t = ((InvocationTargetException) t).getTargetException(); } throw new LDAPPersistException( ERR_OBJECT_HANDLER_ERROR_INVOKING_POST_ENCODE_METHOD.get( postEncodeMethod.getName(), type.getName(), getExceptionMessage(t)), t); } } setDNAndEntryFields(o, entry); if (superclassHandler != null) { final Entry e = superclassHandler.encode(o, parentDN); for (final Attribute a : e.getAttributes()) { entry.addAttribute(a); } } return entry; }
public void printStack() { System.out.println("curr pos=" + currPos); for (Entry s : activeStack) { System.out.println(s.getLocus()); } System.out.println(); }
/** * Drops the entry for {@code key} if it exists and can be removed. Entries actively being edited * cannot be removed. * * @return true if an entry was removed. */ public synchronized boolean remove(String key) throws IOException { checkNotClosed(); validateKey(key); Entry entry = lruEntries.get(key); if (entry == null || entry.currentEditor != null) { return false; } for (int i = 0; i < valueCount; i++) { File file = entry.getCleanFile(i); if (!file.delete()) { throw new IOException("failed to delete " + file); } size -= entry.lengths[i]; entry.lengths[i] = 0; } redundantOpCount++; journalWriter.append(REMOVE + ' ' + key + '\n'); lruEntries.remove(key); if (journalRebuildRequired()) { executorService.submit(cleanupCallable); } return true; }
@SuppressWarnings("unchecked") Boolean visionallRemoveImpl(Object o) { if (!(o instanceof Entry<?, ?>)) { return false; } RootData<K, V> rootData = this.<RootData<K, V>>getRootData(); if (rootData.isLoaded()) { return null; } if (!rootData.isVisionallyReadable(QueuedOperationType.DETACH)) { return null; } if (rootData.isLoading()) { throw new IllegalStateException( LAZY_COMMON_RESOURCE.get().visionOperationWhenDataIsBeingLoaded()); } Entry<K, V> e = (Entry<K, V>) o; Ref<V> ref = rootData.visionallyRead(e.getKey(), null); if (ref != null) { if (ref.get() == null) { return false; } if (!rootData.valueUnifiedComparator().equals(ref.get(), e.getValue())) { return false; } rootData.visinallyRemove(e.getKey(), ref.get()); return true; } return null; }
/** * Get the {@link Entry} best associated with the given {@link Action}, or create and populate a * new one if it doesn't exist. */ protected Entry getEntry(Action action) { final String mimeType = action.getMimeType(); Entry entry = mCache.get(mimeType); if (entry != null) return entry; entry = new Entry(); final Intent intent = action.getIntent(); if (intent != null) { final List<ResolveInfo> matches = mPackageManager.queryIntentActivities(intent, PackageManager.MATCH_DEFAULT_ONLY); // Pick first match, otherwise best found ResolveInfo bestResolve = null; final int size = matches.size(); if (size == 1) { bestResolve = matches.get(0); } else if (size > 1) { bestResolve = getBestResolve(intent, matches); } if (bestResolve != null) { final Drawable icon = bestResolve.loadIcon(mPackageManager); entry.bestResolve = bestResolve; entry.icon = new SoftReference<Drawable>(icon); } } mCache.put(mimeType, entry); return entry; }
public synchronized void unlockResource(final String id) { logger.debug("unlockResource: {}", id); final Entry entry = this.locker.getEntry(id); if (entry == null) { return; } boolean wakeup = false; if (entry.readers.remove(Thread.currentThread())) { wakeup = true; } if (entry.writer == Thread.currentThread()) { entry.writer = null; wakeup = true; } if (checkRemoveEntry(id, entry)) { wakeup = true; } if (wakeup) { notifyAll(); } }
/** Remove string that can be in the cache */ private void removeString(String s) { Object o = strMap.remove(s); if (o instanceof Entry) { Entry e = (Entry) o; Entry ep = e.prev; Entry en = e.next; if (e == chain) { chain = en; if (e == endChain) { endChain = null; } } else { // not begining of chain if (en != null) { en.prev = ep; } else { endChain = ep; } } freeEntry = e; // free - can be reused for addition size--; } /* * In other cases the removed object was either the string which should * be fine here or it was null. */ }
/** Store string that's not yet in the cache */ private void storeString(String s) { Entry e; if (size >= maxSize) { // take last one and move to begining and replace value e = endChain; toStart(e); strMap.remove(e.str); e.str = s; } else { // count of entries less than max if (freeEntry != null) { e = freeEntry; freeEntry = null; e.str = s; e.next = chain; } else { e = new Entry(s, chain); } if (chain != null) { chain.prev = e; } else { // nothing inserted yet endChain = e; } chain = e; size++; } strMap.put(s, e); }
/** * Maps the specified <code>key</code> to the specified <code>value</code> in this hashtable. The * key cannot be <code>null</code>. * * <p>The value can be retrieved by calling the <code>get</code> method with a key that is equal * to the original key. * * @param key the hashtable key. * @param value the value. * @return the previous value of the specified key in this hashtable, or <code>null</code> if it * did not have one. * @throws NullPointerException if the key is <code>null</code>. * @see #get(int) */ public T put(int key, T value) { // Makes sure the key is not already in the hashtable. Entry<T> tab[] = table; int index = getIndex(key); for (Entry<T> e = tab[getIndex(key)]; e != null; e = e.next) { if (e.hash == key) { T old = e.value; e.value = value; return old; } } if (count >= threshold) { // Rehash the table if the threshold is exceeded rehash(); tab = table; index = getIndex(key); } // Creates the new entry. Entry<T> e = new Entry<T>(key, value, tab[index]); tab[index] = e; count++; return null; }
/** * {@inheritDoc} * * <p>This method will, on a best-effort basis, throw a {@link * java.util.ConcurrentModificationException} if the remapping function modified this map during * computation. * * @throws ConcurrentModificationException if it is detected that the remapping function modified * this map */ @Override public synchronized V computeIfPresent( K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { Objects.requireNonNull(remappingFunction); Entry<?, ?> tab[] = table; int hash = key.hashCode(); int index = (hash & 0x7FFFFFFF) % tab.length; @SuppressWarnings("unchecked") Entry<K, V> e = (Entry<K, V>) tab[index]; for (Entry<K, V> prev = null; e != null; prev = e, e = e.next) { if (e.hash == hash && e.key.equals(key)) { int mc = modCount; V newValue = remappingFunction.apply(key, e.value); if (mc != modCount) { throw new ConcurrentModificationException(); } if (newValue == null) { if (prev != null) { prev.next = e.next; } else { tab[index] = e.next; } modCount = mc + 1; count--; } else { e.value = newValue; } return newValue; } } return null; }
/** * Returns the hash code value for this Map as per the definition in the Map interface. * * @see Map#hashCode() * @since 1.2 */ public synchronized int hashCode() { /* * This code detects the recursion caused by computing the hash code * of a self-referential hash table and prevents the stack overflow * that would otherwise result. This allows certain 1.1-era * applets with self-referential hash tables to work. This code * abuses the loadFactor field to do double-duty as a hashCode * in progress flag, so as not to worsen the space performance. * A negative load factor indicates that hash code computation is * in progress. */ int h = 0; if (count == 0 || loadFactor < 0) return h; // Returns zero loadFactor = -loadFactor; // Mark hashCode computation in progress Entry<?, ?>[] tab = table; for (Entry<?, ?> entry : tab) { while (entry != null) { h += entry.hashCode(); entry = entry.next; } } loadFactor = -loadFactor; // Mark hashCode computation complete return h; }
/** * Creates a new journal that omits redundant information. This replaces the current journal if it * exists. */ private synchronized void rebuildJournal() throws IOException { if (journalWriter != null) { journalWriter.close(); } Writer writer = new BufferedWriter(new FileWriter(journalFileTmp), IO_BUFFER_SIZE); writer.write(MAGIC); writer.write("\n"); writer.write(VERSION_1); writer.write("\n"); writer.write(Integer.toString(appVersion)); writer.write("\n"); writer.write(Integer.toString(valueCount)); writer.write("\n"); writer.write("\n"); for (Entry entry : lruEntries.values()) { if (entry.currentEditor != null) { writer.write(DIRTY + ' ' + entry.key + '\n'); } else { writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n'); } } writer.close(); journalFileTmp.renameTo(journalFile); journalWriter = new BufferedWriter(new FileWriter(journalFile, true), IO_BUFFER_SIZE); }
// ----------------------------------------------------------- // for "SrvRqst" // find the matched URLs with (type, scope, predicate, ltag) // return: error code (short) // number of matched URLs (short) // URL blocks (decided bt previous #URL) // ----------------------------------------------------------- public synchronized byte[] getMatchedURL(String type, String scope, String pred, String ltag) { byte[] buf = null; int ecode = Const.OK; if (!Util.shareString(daf.getScope(), scope, ",")) { ecode = Const.SCOPE_NOT_SUPPORTED; } b.reset(); try { int count = 0; d.writeShort(ecode); // error code d.writeShort(count); // URL count, place holder if (ecode == Const.OK) { // no error, find matched URLs Iterator values = table.values().iterator(); while (values.hasNext()) { Entry e = (Entry) values.next(); if (e.match(type, scope, pred, ltag)) { count++; d.writeByte(0); d.writeShort(e.getLifetime()); d.writeShort(e.getURL().length()); d.writeBytes(e.getURL()); d.writeByte(0); } } } buf = b.toByteArray(); if (count > 0) Util.writeInt(buf, 2, count, 2); // update count } catch (Exception e) { if (ServiceLocationManager.displayMSLPTrace) e.printStackTrace(); } return buf; }
/** * Returns a snapshot of the entry named {@code key}, or null if it doesn't exist is not currently * readable. If a value is returned, it is moved to the head of the LRU queue. */ public synchronized Snapshot get(String key) throws IOException { checkNotClosed(); validateKey(key); Entry entry = lruEntries.get(key); if (entry == null) { return null; } if (!entry.readable) { return null; } /* * Open all streams eagerly to guarantee that we see a single published * snapshot. If we opened streams lazily then the streams could come * from different edits. */ InputStream[] ins = new InputStream[valueCount]; try { for (int i = 0; i < valueCount; i++) { ins[i] = new FileInputStream(entry.getCleanFile(i)); } } catch (FileNotFoundException e) { // a file must have been deleted manually! return null; } redundantOpCount++; journalWriter.append(READ + ' ' + key + '\n'); if (journalRebuildRequired()) { executorService.submit(cleanupCallable); } return new Snapshot(key, entry.sequenceNumber, ins); }
// ------------------------------------------------ // save database to either the stdout or the file // ------------------------------------------------ public synchronized void saveDatabase(BufferedWriter o) { Iterator values = table.values().iterator(); while (values.hasNext()) { Entry e = (Entry) values.next(); e.prtEntry(daf, o); } }
@SuppressWarnings("unchecked") @Override public boolean containsAll(Collection<?> c, ElementMatcher<? super Entry<K, V>> matcher) { this.requiredEnabled(); RootData<K, V> rootData = this.getRootData(); if (matcher == null) { UnifiedComparator<? super V> valueUnifiedComparator = rootData.valueUnifiedComparator(); for (Object o : c) { if (!(o instanceof Entry<?, ?>)) { return false; } Entry<K, V> e = (Entry<K, V>) o; Ref<V> ref = rootData.visionallyRead(e.getKey(), null); if (ref == null) { rootData.load(); return this.getBase().containsAll(c, matcher); } if (ref.get() == null) { return false; } if (!valueUnifiedComparator.equals(ref.get(), e.getValue())) { return false; } } return true; } rootData.load(); return this.getBase().containsAll(c, matcher); }
public final Object remove(int index) { Entry e = removeList(index); Object value = e.value; removeMap(e.key); e.value = null; return value; }
/** Re-hashes the table into a new array of buckets. */ private void rehash() { // TODO: it is possible to run this method twice, first time using the 2*k+1 prime sequencer for // newBucketCount // and then with that value reduced to actually shrink capacity. As it is right now, the bucket // table can // only grow in size final Entry[] buckets = m_buckets; final int newBucketCount = (m_buckets.length << 1) + 1; final Entry[] newBuckets = new Entry[newBucketCount]; // rehash all entry chains in every bucket: for (int b = 0; b < buckets.length; ++b) { for (Entry entry = buckets[b]; entry != null; ) { final Entry next = entry.m_next; // remember next pointer because we are going to reuse this entry final int entryKeyHash = entry.m_key.hashCode() & 0x7FFFFFFF; // index into the corresponding new hash bucket: final int newBucketIndex = entryKeyHash % newBucketCount; final Entry bucketListHead = newBuckets[newBucketIndex]; entry.m_next = bucketListHead; newBuckets[newBucketIndex] = entry; entry = next; } } m_sizeThreshold = (int) (newBucketCount * m_loadFactor); m_buckets = newBuckets; }
public Set<IPath> getRoots() { Set<IPath> result = new HashSet<IPath>(); for (Entry e : root.children.values()) { result.add(e.getPath()); } return result; }
private void setDNAndEntryFields(final T o, final Entry e) throws LDAPPersistException { if (dnField != null) { try { dnField.set(o, e.getDN()); } catch (Exception ex) { debugException(ex); throw new LDAPPersistException( ERR_OBJECT_HANDLER_ERROR_SETTING_DN.get( type.getName(), e.getDN(), dnField.getName(), getExceptionMessage(ex)), ex); } } if (entryField != null) { try { entryField.set(o, new ReadOnlyEntry(e)); } catch (Exception ex) { debugException(ex); throw new LDAPPersistException( ERR_OBJECT_HANDLER_ERROR_SETTING_ENTRY.get( type.getName(), entryField.getName(), getExceptionMessage(ex)), ex); } } }
public V getByKey(K k) { Entry e = kEntyMap.get(k); if (e == null) { return null; } return e.getV(); }
/** * Copies the key/value mappings in <tt>map</tt> into this map. Note that this will be a * <b>deep</b> copy, as storage is by primitive value. * * @param map a <code>Map</code> value */ public void putAll(Map<? extends Character, ? extends Double> map) { Iterator<? extends Entry<? extends Character, ? extends Double>> it = map.entrySet().iterator(); for (int i = map.size(); i-- > 0; ) { Entry<? extends Character, ? extends Double> e = it.next(); this.put(e.getKey(), e.getValue()); } }
public K getbyValue(V v) { Entry e = vEntyMap.get(v); if (e == null) { return null; } return e.getK(); }
/** * {@inheritDoc} * * <p>This implementation first checks the structure of {@code object}. If it is not a map or of a * different size, this returns false. Otherwise it iterates its own entry set, looking up each * entry's key in {@code object}. If any value does not equal the other map's value for the same * key, this returns false. Otherwise it returns true. */ @Override public boolean equals(Object object) { if (this == object) { return true; } if (object instanceof Map) { Map<?, ?> map = (Map<?, ?>) object; if (size() != map.size()) { return false; } try { for (Entry<K, V> entry : entrySet()) { K key = entry.getKey(); V mine = entry.getValue(); Object theirs = map.get(key); if (mine == null) { if (theirs != null || !map.containsKey(key)) { return false; } } else if (!mine.equals(theirs)) { return false; } } } catch (NullPointerException ignored) { return false; } catch (ClassCastException ignored) { return false; } return true; } return false; }
private void readJournalLine(String line) throws IOException { String[] parts = line.split(" "); if (parts.length < 2) { throw new IOException("unexpected journal line: " + line); } String key = parts[1]; if (parts[0].equals(REMOVE) && parts.length == 2) { lruEntries.remove(key); return; } Entry entry = lruEntries.get(key); if (entry == null) { entry = new Entry(key); lruEntries.put(key, entry); } if (parts[0].equals(CLEAN) && parts.length == 2 + valueCount) { entry.readable = true; entry.currentEditor = null; entry.setLengths(copyOfRange(parts, 2, parts.length)); } else if (parts[0].equals(DIRTY) && parts.length == 2) { entry.currentEditor = new Editor(entry); } else if (parts[0].equals(READ) && parts.length == 2) { // this work was already done by calling lruEntries.get() } else { throw new IOException("unexpected journal line: " + line); } }
private final Entry<KEY, VALUE> removeMap(Object key) { int hashCode = 0; int index = 0; if (key != null) { hashCode = key.hashCode(); index = (hashCode & 0x7FFFFFFF) % mapTable.length; for (Entry<KEY, VALUE> e = mapTable[index], prev = null; e != null; prev = e, e = e.next) { if ((e.hashCode == hashCode) && key.equals(e.key)) { if (prev != null) { prev.next = e.next; } else { mapTable[index] = e.next; } return e; } } } else { for (Entry<KEY, VALUE> e = mapTable[index], prev = null; e != null; prev = e, e = e.next) { if ((e.hashCode == hashCode) && e.key == null) { if (prev != null) { prev.next = e.next; } else { mapTable[index] = e.next; } return e; } } } return null; }
public void remove() { if (!iterator) throw new UnsupportedOperationException(); if (lastReturned == null) throw new IllegalStateException("Hashtable Enumerator"); if (modCount != expectedModCount) throw new ConcurrentModificationException(); synchronized (Hashtable.this) { Entry<?, ?>[] tab = Hashtable.this.table; int index = (lastReturned.hash & 0x7FFFFFFF) % tab.length; @SuppressWarnings("unchecked") Entry<K, V> e = (Entry<K, V>) tab[index]; for (Entry<K, V> prev = null; e != null; prev = e, e = e.next) { if (e == lastReturned) { if (prev == null) tab[index] = e.next; else prev.next = e.next; expectedModCount++; lastReturned = null; Hashtable.this.modCount++; Hashtable.this.count--; return; } } throw new ConcurrentModificationException(); } }