Example #1
0
  /**
   * All constructors delegate to this constructor to set the htree reference and core metadata.
   *
   * @param htree The {@link HTree} to which the page belongs.
   * @param dirty Used to set the {@link PO#dirty} state. All nodes and leaves created by
   *     non-deserialization constructors begin their life cycle as <code>dirty := true</code> All
   *     nodes or leaves de-serialized from the backing store begin their life cycle as clean (dirty
   *     := false). This we read nodes and leaves into immutable objects, those objects will remain
   *     clean. Eventually a copy-on-write will create a mutable node or leaf from the immutable one
   *     and that node or leaf will be dirty.
   * @param globalDepth The size of the address space (in bits) for each buddy hash table (bucket)
   *     on a directory (bucket) page. The global depth of a node is defined recursively as the
   *     local depth of that node within its parent. The global/local depth are not stored
   *     explicitly. Instead, the local depth is computed dynamically when the child will be
   *     materialized by counting the #of pointers to the the child in the appropriate buddy hash
   *     table in the parent. This local depth value is passed into the constructor when the child
   *     is materialized and set as the global depth of the child.
   */
  protected AbstractPage(final HTree htree, final boolean dirty, final int globalDepth) {

    if (htree == null) throw new IllegalArgumentException();

    if (globalDepth < 0) throw new IllegalArgumentException();

    if (globalDepth > htree.addressBits) throw new IllegalArgumentException();

    this.htree = htree;

    this.globalDepth = globalDepth;

    // reference to self: reused to link parents and children.
    this.self = htree.newRef(this);

    if (!dirty) {

      /*
       * Nodes default to being dirty, so we explicitly mark this as
       * clean. This is ONLY done for the de-serialization constructors.
       */

      setDirty(false);
    }

    // Add to the hard reference queue.
    htree.touch(this);
  }
Example #2
0
  public synchronized void deleteCollection(String name) {
    try {
      long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
      if (nameDirectory_recid == 0) throw new IOException("Collection not found");
      HTree<String, Long> dir = fetch(nameDirectory_recid);

      Long recid = dir.get(name);
      if (recid == null) throw new IOException("Collection not found");

      Object o = fetch(recid);
      // we can not use O instance since it is not correctly initialized
      if (o instanceof LinkedList2) {
        LinkedList2 l = (LinkedList2) o;
        l.clear();
        delete(l.rootRecid);
      } else if (o instanceof BTree) {
        ((BTree) o).clear();
      } else if (o instanceof HTree) {
        HTree t = (HTree) o;
        t.clear();
        HTreeDirectory n = (HTreeDirectory) fetch(t.rootRecid, t.SERIALIZER);
        n.deleteAllChildren();
        delete(t.rootRecid);
      } else {
        throw new InternalError("unknown collection type: " + (o == null ? null : o.getClass()));
      }
      delete(recid);
      collections.remove(name);

      dir.remove(name);

    } catch (IOException e) {
      throw new IOError(e);
    }
  }
Example #3
0
 /**
  * Obtain the record id of a named object. Returns 0 if named object doesn't exist. Named objects
  * are used to store Map views and other well known objects.
  */
 protected synchronized long getNamedObject(String name) throws IOException {
   long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
   if (nameDirectory_recid == 0) {
     return 0;
   }
   HTree<String, Long> m = fetch(nameDirectory_recid);
   Long res = m.get(name);
   if (res == null) return 0;
   return res;
 }
Example #4
0
 /**
  * Set the record id of a named object. Named objects are used to store Map views and other well
  * known objects.
  */
 protected synchronized void setNamedObject(String name, long recid) throws IOException {
   long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
   HTree<String, Long> m = null;
   if (nameDirectory_recid == 0) {
     // does not exists, create it
     m = new HTree<String, Long>(this, null, null, true);
     nameDirectory_recid = insert(m);
     setRoot(NAME_DIRECTORY_ROOT, nameDirectory_recid);
   } else {
     // fetch it
     m = fetch(nameDirectory_recid);
   }
   m.put(name, recid);
 }
Example #5
0
  /*
  test this issue
  http://code.google.com/p/jdbm2/issues/detail?id=2
   */
  public void testHTreeClear() throws IOException {
    final DBAbstract db = newDBCache();
    final HTree<String, String> tree = (HTree) db.createHashMap("name");

    for (int i = 0; i < 1001; i++) {
      tree.put(String.valueOf(i), String.valueOf(i));
    }
    db.commit();
    System.out.println("finished adding");

    tree.clear();
    db.commit();
    System.out.println("finished clearing");
    assertTrue(tree.isEmpty());
  }
Example #6
0
  public synchronized <K, V> ConcurrentMap<K, V> getHashMap(String name) {
    Object o = getCollectionInstance(name);
    if (o != null) return (ConcurrentMap<K, V>) o;

    try {
      long recid = getNamedObject(name);
      if (recid == 0) return null;

      HTree tree = fetch(recid);
      tree.setPersistenceContext(this);
      if (!tree.hasValues()) {
        throw new ClassCastException("HashSet is not HashMap");
      }
      collections.put(name, new WeakReference<Object>(tree));
      return tree;
    } catch (IOException e) {
      throw new IOError(e);
    }
  }
Example #7
0
  public synchronized <K> Set<K> getHashSet(String name) {
    Object o = getCollectionInstance(name);
    if (o != null) return (Set<K>) o;

    try {
      long recid = getNamedObject(name);
      if (recid == 0) return null;

      HTree tree = fetch(recid);
      tree.setPersistenceContext(this);
      if (tree.hasValues()) {
        throw new ClassCastException("HashMap is not HashSet");
      }
      Set<K> ret = new HTreeSet(tree);
      collections.put(name, new WeakReference<Object>(ret));
      return ret;
    } catch (IOException e) {
      throw new IOError(e);
    }
  }
Example #8
0
  public synchronized Map<String, Object> getCollections() {
    try {
      Map<String, Object> ret = new LinkedHashMap<String, Object>();
      long nameDirectory_recid = getRoot(NAME_DIRECTORY_ROOT);
      if (nameDirectory_recid == 0) return ret;
      HTree<String, Long> m = fetch(nameDirectory_recid);

      for (Map.Entry<String, Long> e : m.entrySet()) {
        Object o = fetch(e.getValue());
        if (o instanceof BTree) {
          if (((BTree) o).hasValues) o = getTreeMap(e.getKey());
          else o = getTreeSet(e.getKey());
        } else if (o instanceof HTree) {
          if (((HTree) o).hasValues) o = getHashMap(e.getKey());
          else o = getHashSet(e.getKey());
        }

        ret.put(e.getKey(), o);
      }
      return Collections.unmodifiableMap(ret);
    } catch (IOException e) {
      throw new IOError(e);
    }
  }
Example #9
0
  /**
   * Return this node or leaf iff it is dirty (aka mutable) and otherwise return a copy of this node
   * or leaf. If a copy is made of the node, then a copy will also be made of each immutable parent
   * up to the first mutable parent or the root of the tree, which ever comes first. If the root is
   * copied, then the new root will be set on the {@link HTree}. This method must MUST be invoked
   * any time an mutative operation is requested for the leaf.
   *
   * <p>Note: You can not modify a node that has been written onto the store. Instead, you have to
   * clone the node causing it and all nodes up to the root to be dirty and transient. This method
   * handles that cloning process, but the caller MUST test whether or not the node was copied by
   * this method, MUST delegate the mutation operation to the copy iff a copy was made, and MUST be
   * aware that the copy exists and needs to be used in place of the immutable version of the node.
   *
   * @param triggeredByChildId The persistent identity of child that triggered this event if any.
   * @return Either this node or a copy of this node.
   */
  protected AbstractPage copyOnWrite(final long triggeredByChildId) {

    //        if (isPersistent()) {
    if (!isReadOnly()) {

      /*
       * Since a clone was not required, we use this as an opportunity to
       * touch the hard reference queue. This helps us to ensure that
       * nodes which have been touched recently will remain strongly
       * reachable.
       */

      htree.touch(this);

      return this;
    }

    if (log.isInfoEnabled()) {
      log.info("this=" + toShortString() + ", trigger=" + triggeredByChildId);
    }

    // cast to mutable implementation class.
    final HTree htree = (HTree) this.htree;

    // identify of the node that is being copied and deleted.
    final long oldId = this.identity;

    assert oldId != NULL;

    // parent of the node that is being cloned (null iff it is the root).
    DirectoryPage parent = this.getParentDirectory();

    // the new node (mutable copy of the old node).
    final AbstractPage newNode;

    if (isLeaf()) {

      newNode = new BucketPage((BucketPage) this);

      htree.getBtreeCounters().leavesCopyOnWrite++;

    } else {

      newNode = new DirectoryPage((DirectoryPage) this, triggeredByChildId);

      htree.getBtreeCounters().nodesCopyOnWrite++;
    }

    // delete this node now that it has been cloned.
    this.delete();

    if (htree.root == this) {

      assert parent == null;

      // Update the root node on the htree.
      if (log.isInfoEnabled()) log.info("Copy-on-write : replaced root node on htree.");

      final boolean wasDirty = htree.root.dirty;

      assert newNode != null;

      htree.root = (DirectoryPage) newNode;

      if (!wasDirty) {

        htree.fireDirtyEvent();
      }

    } else {

      /*
       * Recursive copy-on-write up the tree. This operations stops as
       * soon as we reach a parent node that is already dirty and
       * grounds out at the root in any case.
       */
      assert parent != null;

      if (!parent.isDirty()) {

        /*
         * Note: pass up the identity of the old child since we want
         * to avoid having its parent reference reset.
         */
        parent = (DirectoryPage) parent.copyOnWrite(oldId);
      }

      /*
       * Replace the reference to this child with the reference to the
       * new child. This makes the old child inaccessible via
       * navigation. It will be GCd once it falls off of the hard
       * reference queue.
       */
      parent.replaceChildRef(oldId, newNode);
    }

    return newNode;
  }