@Test public void submapToString() { for (int i = 0; i < 20; i++) { m.put(i, "aa" + i); } Map submap = m.subMap(10, true, 13, true); assertEquals("{10=aa10, 11=aa11, 12=aa12, 13=aa13}", submap.toString()); }
@Test public void test_next_dir_infinity() { BTreeMap m = new BTreeMap(engine, 32, true, false, null, null, null, null); BTreeMap.DirNode d = new BTreeMap.DirNode(new Object[] {null, 62, 68, 71}, new long[] {10, 20, 30, 40}); assertEquals(10, m.nextDir(d, 33)); assertEquals(10, m.nextDir(d, 62)); assertEquals(20, m.nextDir(d, 63)); d = new BTreeMap.DirNode(new Object[] {44, 62, 68, null}, new long[] {10, 20, 30, 40}); assertEquals(10, m.nextDir(d, 62)); assertEquals(10, m.nextDir(d, 44)); assertEquals(10, m.nextDir(d, 48)); assertEquals(20, m.nextDir(d, 63)); assertEquals(20, m.nextDir(d, 64)); assertEquals(20, m.nextDir(d, 68)); assertEquals(30, m.nextDir(d, 69)); assertEquals(30, m.nextDir(d, 70)); assertEquals(30, m.nextDir(d, 71)); assertEquals(30, m.nextDir(d, 72)); assertEquals(30, m.nextDir(d, 73)); }
@Test public void concurrent_first_key() { DB db = DBMaker.memoryDB().transactionDisable().make(); final BTreeMap m = db.treeMap("name"); // fill final int c = 1000000 * TT.scale(); for (int i = 0; i <= c; i++) { m.put(i, i); } Thread t = new Thread() { @Override public void run() { for (int i = 0; i <= c; i++) { m.remove(c); } } }; t.run(); while (t.isAlive()) { assertNotNull(m.firstKey()); } }
/** * Commit all changes and persist them to disk. This method does nothing if there are no unsaved * changes, otherwise it increments the current version and stores the data (for file based * storages). * * <p>At most one storage operation may run at any time. * * @return the new version (incremented if there were changes) */ private synchronized long commitAndSave() { if (closed) { return currentVersion; } if (map.isInMemory()) { throw DataUtils.newIllegalStateException( DataUtils.ERROR_WRITING_FAILED, "This is an in-memory storage"); } if (map.isReadOnly()) { throw DataUtils.newIllegalStateException( DataUtils.ERROR_WRITING_FAILED, "This storage is read-only"); } if (!hasUnsavedChanges()) { return currentVersion; } try { currentStoreVersion = currentVersion; return save(); } catch (IllegalStateException e) { panic(e); return -1; } finally { currentStoreVersion = -1; } }
/** * Open an old, stored version of a map. * * @param version the version * @return the read-only map */ @SuppressWarnings("unchecked") <T extends BTreeMap<?, ?>> T openMapVersion(long version) { BTreeChunk c = getChunkForVersion(version); DataUtils.checkArgument(c != null, "Unknown version {0}", version); BTreeMap<?, ?> m = map.openReadOnly(); m.setRootPos(c.rootPagePos, version); return (T) m; }
@Test public void root_leaf_insert() { BTreeMap m = new BTreeMap(engine, 6, true, false, null, null, null, null); m.put(11, 12); BTreeMap.LeafNode n = (BTreeMap.LeafNode) engine.recordGet(m.rootRecid, m.nodeSerializer); assertArrayEquals(new Object[] {null, 11, null}, n.keys); assertArrayEquals(new Object[] {null, 12, null}, n.vals); assertEquals(0, n.next); }
/** * Check whether there are any unsaved changes. * * @return if there are any changes */ private boolean hasUnsavedChanges() { checkOpen(); if (!map.isClosed()) { long v = map.getVersion(); if (v >= 0 && v > lastStoredVersion) { return true; } } return false; }
@Test public void mod_listener_lock() { DB db = DBMaker.memoryDB().transactionDisable().make(); final BTreeMap m = db.treeMap("name"); final long rootRecid = db.getEngine().get(m.rootRecidRef, Serializer.RECID); final AtomicInteger counter = new AtomicInteger(); m.modificationListenerAdd( new Bind.MapListener() { @Override public void update(Object key, Object oldVal, Object newVal) { assertTrue(m.nodeLocks.get(rootRecid) == Thread.currentThread()); assertEquals(1, m.nodeLocks.size()); counter.incrementAndGet(); } }); m.put("aa", "aa"); m.put("aa", "bb"); m.remove("aa"); m.put("aa", "aa"); m.remove("aa", "aa"); m.putIfAbsent("aa", "bb"); m.replace("aa", "bb", "cc"); m.replace("aa", "cc"); assertEquals(8, counter.get()); }
@Test public void test_next_dir_infinity() { BTreeMap.DirNode d = new BTreeMap.DirNode( new Object[] {62, 68, 71}, true, false, false, mkchild(10, 20, 30, 40)); assertEquals(10, m.nextDir(d, 33)); assertEquals(10, m.nextDir(d, 62)); assertEquals(20, m.nextDir(d, 63)); d = new BTreeMap.DirNode(new Object[] {44, 62, 68}, false, true, false, mkchild(10, 20, 30, 0)); assertEquals(10, m.nextDir(d, 62)); assertEquals(10, m.nextDir(d, 44)); assertEquals(10, m.nextDir(d, 48)); assertEquals(20, m.nextDir(d, 63)); assertEquals(20, m.nextDir(d, 64)); assertEquals(20, m.nextDir(d, 68)); assertEquals(30, m.nextDir(d, 69)); assertEquals(30, m.nextDir(d, 70)); assertEquals(30, m.nextDir(d, 71)); assertEquals(30, m.nextDir(d, 72)); assertEquals(30, m.nextDir(d, 73)); }
@Test public void test_size() { assertTrue(m.isEmpty()); assertEquals(0, m.size()); for (int i = 1; i < 30; i++) { m.put(i, i); assertEquals(i, m.size()); assertFalse(m.isEmpty()); } }
@Test public void batch_insert() { for (int i = 0; i < 1000; i++) { m.put(i * 10, i * 10 + 1); } for (int i = 0; i < 10000; i++) { assertEquals(i % 10 == 0 ? i + 1 : null, m.get(i)); } }
@Test public void batch_insert() { BTreeMap m = new BTreeMap(engine, 6, true, false, null, null, null, null); for (int i = 0; i < 1000; i++) { m.put(i * 10, i * 10 + 1); } for (int i = 0; i < 10000; i++) { assertEquals(i % 10 == 0 ? i + 1 : null, m.get(i)); } }
@Test public void delete() { BTreeMap m = new BTreeMap(engine, 6, true, false, null, null, null, null); for (int i : new int[] { 10, 50, 20, 42, // 44, 68, 20, 93, 85, 71, 62, 77, 4, 37, 66 }) { m.put(i, i); } assertEquals(10, m.remove(10)); assertEquals(20, m.remove(20)); assertEquals(42, m.remove(42)); }
@Test public void test_key_iterator() { BTreeMap m = new BTreeMap(engine, 6, true, false, null, null, null, null); for (int i = 0; i < 20; i++) { m.put(i, i * 10); } Iterator iter = m.keySet().iterator(); for (int i = 0; i < 20; i++) { assertTrue(iter.hasNext()); assertEquals(i, iter.next()); } assertFalse(iter.hasNext()); }
@Test public void test_key_iterator() { for (int i = 0; i < 20; i++) { m.put(i, i * 10); } Iterator iter = m.keySet().iterator(); for (int i = 0; i < 20; i++) { assertTrue(iter.hasNext()); assertEquals(i, iter.next()); } assertFalse(iter.hasNext()); }
/** * Revert to the beginning of the given version. All later changes (stored or not) are forgotten. * All maps that were created later are closed. A rollback to a version before the last stored * version is immediately persisted. Rollback to version 0 means all data is removed. * * @param version the version to revert to */ public synchronized void rollbackTo(long version) { checkOpen(); if (version == 0) { // special case: remove all data map.close(); chunks.clear(); currentVersion = version; return; } DataUtils.checkArgument(isKnownVersion(version), "Unknown version {0}", version); map.internalRollbackTo(version); boolean loadFromFile = false; // find out which chunks to remove, // and which is the newest chunk to keep // (the chunk list can have gaps) ArrayList<Integer> remove = new ArrayList<Integer>(); BTreeChunk keep = null; for (BTreeChunk c : chunks.values()) { if (c.version > version) { remove.add(c.id); } else if (keep == null || keep.id < c.id) { keep = c; } } if (remove.size() > 0) { // remove the youngest first, so we don't create gaps // (in case we remove many chunks) Collections.sort(remove, Collections.reverseOrder()); map.removeUnusedOldVersions(); loadFromFile = true; for (int id : remove) { BTreeChunk c = chunks.remove(id); c.fileStorage.close(); c.fileStorage.delete(); } lastChunkId = keep.id; setLastChunk(keep); } if (createVersion >= version) { map.close(); } else { if (loadFromFile) { map.setRootPos(lastChunk.rootPagePos, lastChunk.version); } } currentVersion = version; }
@Test public void delete() { for (int i : new int[] { 10, 50, 20, 42, // 44, 68, 20, 93, 85, 71, 62, 77, 4, 37, 66 }) { m.put(i, i); } assertEquals(10, m.remove(10)); assertEquals(20, m.remove(20)); assertEquals(42, m.remove(42)); assertEquals(null, m.remove(42999)); }
/** * Rename a map. * * @param map the map * @param newName the new name */ public synchronized void renameMap(BTreeMap<?, ?> map, String newName) { // TODO if (map.isInMemory()) return; checkOpen(); String oldName = map.getName(); if (oldName.equals(newName)) { return; } String fileName = (String) map.config.get("storageName"); if (fileName != null) { fileName = fileName + File.separator + newName; if (!FileUtils.exists(fileName)) FileUtils.createDirectories(fileName); close(); FileUtils.move(btreeStorageName, fileName); // btreeStorageName = fileName; } }
public synchronized void rollback() { try { for (WeakReference<Object> o : collections.values()) { Object c = o.get(); if (c != null && c instanceof BTreeMap) { // reload tree BTreeMap m = (BTreeMap) c; m.tree = fetch(m.tree.getRecid()); } if (c != null && c instanceof BTreeSet) { // reload tree BTreeSet m = (BTreeSet) c; m.map.tree = fetch(m.map.tree.getRecid()); } } } catch (IOException e) { throw new IOError(e); } }
private void compactRewrite(ArrayList<BTreeChunk> old) { HashSet<Integer> set = New.hashSet(); for (BTreeChunk c : old) { set.add(c.id); } if (!map.rewrite(set)) { return; } freeUnusedChunks(); commitAndSave(); }
/** * Get the oldest version to retain in memory, which is the manually set retain version, or the * current store version (whatever is older). * * @return the version */ long getOldestVersionToKeep() { long v = currentVersion; if (map.isInMemory()) { return v - versionsToKeep; } long storeVersion = currentStoreVersion; if (storeVersion > -1) { v = Math.min(v, storeVersion); } return v; }
@Test public void root_leaf_insert() { if (valsOutside) return; m.put(11, 12); final long rootRecid = engine.get(m.rootRecidRef, Serializer.RECID); BTreeMap.LeafNode n = (BTreeMap.LeafNode) engine.get(rootRecid, m.nodeSerializer); assertTrue(Arrays.equals(new Object[] {null, 11, null}, nodeKeysToArray(n))); assertTrue(Arrays.equals(new Object[] {12}, (Object[]) n.vals)); assertEquals(0, n.next); }
/** Close the file and the storage. Unsaved changes are written to disk first. */ void close() { if (closed) { return; } if (!map.isInMemory()) { if (hasUnsavedChanges()) { commitAndSave(); } } closeStorage(); }
@Test public void randomStructuralCheck() { Random r = new Random(); BTreeMap map = DBMaker.memoryDB() .transactionDisable() .make() .treeMapCreate("aa") .keySerializer(BTreeKeySerializer.INTEGER) .valueSerializer(Serializer.INTEGER) .make(); int max = 100000 * TT.scale(); for (int i = 0; i < max * 10; i++) { map.put(r.nextInt(max), r.nextInt()); } map.checkStructure(); }
public synchronized <K> NavigableSet<K> createTreeSet(BTreeSetMaker m) { checkNameNotExists(m.name); m.serializer = fillNulls(m.serializer); m.serializer = catPut( m.name + ".keySerializer", m.serializer, new BTreeKeySerializer.BasicKeySerializer(getDefaultSerializer())); m.comparator = catPut(m.name + ".comparator", m.comparator, Utils.COMPARABLE_COMPARATOR); if (m.pumpPresortBatchSize != -1) { m.pumpSource = Pump.sort( m.pumpSource, m.pumpPresortBatchSize, Collections.reverseOrder(m.comparator), getDefaultSerializer()); } long counterRecid = !m.keepCounter ? 0L : engine.put(0L, Serializer.LONG); long rootRecidRef; if (m.pumpSource == null) { rootRecidRef = BTreeMap.createRootRef(engine, m.serializer, null, m.comparator); } else { rootRecidRef = Pump.buildTreeMap( m.pumpSource, engine, Fun.noTransformExtractor(), null, m.nodeSize, false, counterRecid, m.serializer, null, m.comparator); } NavigableSet<K> ret = new BTreeMap<K, Object>( engine, catPut(m.name + ".rootRecidRef", rootRecidRef), catPut(m.name + ".maxNodeSize", m.nodeSize), false, catPut(m.name + ".counterRecid", counterRecid), m.serializer, null, m.comparator) .keySet(); catalog.put(m.name + ".type", "TreeSet"); collections.put(m.name, new WeakReference<Object>(ret)); return ret; }
/** delete record/collection with given name */ public synchronized void delete(String name) { Object r = get(name); if (r instanceof Atomic.Boolean) { engine.delete(((Atomic.Boolean) r).recid, Serializer.BOOLEAN); } else if (r instanceof Atomic.Integer) { engine.delete(((Atomic.Integer) r).recid, Serializer.INTEGER); } else if (r instanceof Atomic.Long) { engine.delete(((Atomic.Long) r).recid, Serializer.LONG); } else if (r instanceof Atomic.String) { engine.delete(((Atomic.String) r).recid, Serializer.STRING_NOSIZE); } else if (r instanceof Atomic.Var) { engine.delete(((Atomic.Var) r).recid, ((Atomic.Var) r).serializer); } else if (r instanceof Queue) { // drain queue Queue q = (Queue) r; while (q.poll() != null) { // do nothing } } else if (r instanceof HTreeMap || r instanceof HTreeMap.KeySet) { HTreeMap m = (r instanceof HTreeMap) ? (HTreeMap) r : ((HTreeMap.KeySet) r).parent(); m.clear(); // delete segments for (long segmentRecid : m.segmentRecids) { engine.delete(segmentRecid, HTreeMap.DIR_SERIALIZER); } } else if (r instanceof BTreeMap || r instanceof BTreeMap.KeySet) { BTreeMap m = (r instanceof BTreeMap) ? (BTreeMap) r : (BTreeMap) ((BTreeMap.KeySet) r).m; // TODO on BTreeMap recursively delete all nodes m.clear(); if (m.counter != null) engine.delete(m.counter.recid, Serializer.LONG); } for (String n : catalog.keySet()) { if (!n.startsWith(name)) continue; catalog.remove(n); } namesInstanciated.remove(name); namesLookup.remove(r); }
@Test public void simple_root_get() { BTreeMap m = new BTreeMap(engine, 32, true, false, null, null, null, null); BTreeMap.LeafNode l = new BTreeMap.LeafNode( new Object[] {null, 10, 20, 30, null}, new Object[] {null, 10, 20, 30, null}, 0); m.rootRecid = engine.recordPut(l, m.nodeSerializer); assertEquals(null, m.get(1)); assertEquals(null, m.get(9)); assertEquals(10, m.get(10)); assertEquals(null, m.get(11)); assertEquals(null, m.get(19)); assertEquals(20, m.get(20)); assertEquals(null, m.get(21)); assertEquals(null, m.get(29)); assertEquals(30, m.get(30)); assertEquals(null, m.get(31)); }
/** * Compact the storage by moving all live pages to new chunks. * * @return if anything was written */ public synchronized boolean compactRewriteFully() { checkOpen(); if (lastChunk == null) { // nothing to do return false; } BTreeCursor<?, ?> cursor = (BTreeCursor<?, ?>) map.cursor(null); BTreePage lastPage = null; while (cursor.hasNext()) { cursor.next(); BTreePage p = cursor.getPage(); if (p == lastPage) { continue; } Object k = p.getKey(0); Object v = p.getValue(0); map.put(k, v); lastPage = p; } commitAndSave(); // TODO 删除之前的所有chunk return true; }
@Override protected ConcurrentMap<Integer, String> makeEmptyMap() throws UnsupportedOperationException { return new BTreeMap( r, BTreeMap.createRootRef( r, BTreeKeySerializer.BASIC, Serializer.BASIC, BTreeMap.COMPARABLE_COMPARATOR, 0), 6, false, 0, BTreeKeySerializer.BASIC, Serializer.BASIC, BTreeMap.COMPARABLE_COMPARATOR, 0, false); }
protected synchronized <K, V> BTreeMap<K, V> createTreeMap(BTreeMapMaker m) { String name = m.name; checkNameNotExists(name); m.keySerializer = fillNulls(m.keySerializer); m.keySerializer = catPut( name + ".keySerializer", m.keySerializer, new BTreeKeySerializer.BasicKeySerializer(getDefaultSerializer())); m.valueSerializer = catPut(name + ".valueSerializer", m.valueSerializer, getDefaultSerializer()); m.comparator = catPut(name + ".comparator", m.comparator, Utils.COMPARABLE_COMPARATOR); long counterRecid = !m.keepCounter ? 0L : engine.put(0L, Serializer.LONG); long rootRecidRef; if (m.pumpSource == null) { rootRecidRef = BTreeMap.createRootRef(engine, m.keySerializer, m.valueSerializer, m.comparator); } else { rootRecidRef = Pump.buildTreeMap( m.pumpSource, engine, m.pumpKeyExtractor, m.pumpValueExtractor, m.nodeSize, m.valuesStoredOutsideNodes, counterRecid, m.keySerializer, m.valueSerializer, m.comparator); } BTreeMap<K, V> ret = new BTreeMap<K, V>( engine, catPut(name + ".rootRecidRef", rootRecidRef), catPut(name + ".maxNodeSize", m.nodeSize), catPut(name + ".valuesOutsideNodes", m.valuesStoredOutsideNodes), catPut(name + ".counterRecid", counterRecid), m.keySerializer, m.valueSerializer, m.comparator); catalog.put(name + ".type", "TreeMap"); collections.put(name, new WeakReference<Object>(ret)); return ret; }