示例#1
0
  /**
   * Creates a {@link Justification}, writes it on the store using {@link
   * RDFJoinNexus#newInsertBuffer(IMutableRelation)}, verifies that we can read it back from the
   * store, and then retracts the justified statement and verifies that the justification was also
   * retracted.
   */
  public void test_writeReadRetract() {

    final Properties properties = super.getProperties();

    // override the default axiom model.
    properties.setProperty(
        com.bigdata.rdf.store.AbstractTripleStore.Options.AXIOMS_CLASS, NoAxioms.class.getName());

    final AbstractTripleStore store = getStore(properties);

    try {

      if (!store.isJustify()) {

        log.warn("Test skipped - justifications not enabled");
      }

      /*
       * the explicit statement that is the support for the rule.
       */

      final IV U = store.addTerm(new URIImpl("http://www.bigdata.com/U"));
      final IV A = store.addTerm(new URIImpl("http://www.bigdata.com/A"));
      final IV Y = store.addTerm(new URIImpl("http://www.bigdata.com/Y"));

      store.addStatements(
          new SPO[] { //
            new SPO(U, A, Y, StatementEnum.Explicit) //
          }, //
          1);

      assertTrue(store.hasStatement(U, A, Y));
      assertEquals(1, store.getStatementCount());

      final InferenceEngine inf = store.getInferenceEngine();

      final Vocabulary vocab = store.getVocabulary();

      // the rule.
      final Rule rule = new RuleRdf01(store.getSPORelation().getNamespace(), vocab);

      final IJoinNexus joinNexus =
          store
              .newJoinNexusFactory(
                  RuleContextEnum.DatabaseAtOnceClosure,
                  ActionEnum.Insert,
                  IJoinNexus.ALL,
                  null /* filter */)
              .newInstance(store.getIndexManager());

      /*
       * The buffer that accepts solutions and causes them to be written
       * onto the statement indices and the justifications index.
       */
      final IBuffer<ISolution[]> insertBuffer = joinNexus.newInsertBuffer(store.getSPORelation());

      // the expected justification (setup and verified below).
      final Justification jst;

      // the expected entailment.
      final SPO expectedEntailment =
          new SPO( //
              A, vocab.get(RDF.TYPE), vocab.get(RDF.PROPERTY), StatementEnum.Inferred);

      {
        final IBindingSet bindingSet = joinNexus.newBindingSet(rule);

        /*
         * Note: rdfs1 is implemented using a distinct term scan. This
         * has the effect of leaving the variables that do not appear in
         * the head of the rule unbound. Therefore we DO NOT bind those
         * variables here in the test case and they will be represented
         * as ZERO (0L) in the justifications index and interpreted as
         * wildcards.
         */
        //                bindingSet.set(Var.var("u"), new Constant<IV>(U));
        bindingSet.set(Var.var("a"), new Constant<IV>(A));
        //                bindingSet.set(Var.var("y"), new Constant<IV>(Y));

        final ISolution solution = new Solution(joinNexus, rule, bindingSet);

        /*
         * Verify the justification that will be built from that
         * solution.
         */
        {
          jst = new Justification(solution);

          /*
           * Verify the bindings on the head of the rule as
           * represented by the justification.
           */
          assertEquals(expectedEntailment, jst.getHead());

          /*
           * Verify the bindings on the tail of the rule as
           * represented by the justification. Again, note that the
           * variables that do not appear in the head of the rule are
           * left unbound for rdfs1 as a side-effect of evaluation
           * using a distinct term scan.
           */
          final SPO[] expectedTail =
              new SPO[] { //
                new SPO(NULL, A, NULL, StatementEnum.Inferred) //
              };

          if (!Arrays.equals(expectedTail, jst.getTail())) {

            fail("Expected: " + Arrays.toString(expectedTail) + ", but actual: " + jst);
          }
        }

        // insert solution into the buffer.
        insertBuffer.add(new ISolution[] {solution});
      }

      //            SPOAssertionBuffer buf = new SPOAssertionBuffer(store, store,
      //                    null/* filter */, 100/* capacity */, true/* justified */);
      //
      //            assertTrue(buf.add(head, jst));

      // no justifications before hand.
      assertEquals(0L, store.getSPORelation().getJustificationIndex().rangeCount());

      // flush the buffer.
      assertEquals(1L, insertBuffer.flush());

      // one justification afterwards.
      assertEquals(1L, store.getSPORelation().getJustificationIndex().rangeCount());

      /*
       * verify read back from the index.
       */
      {
        final ITupleIterator<Justification> itr =
            store.getSPORelation().getJustificationIndex().rangeIterator();

        while (itr.hasNext()) {

          final ITuple<Justification> tuple = itr.next();

          // de-serialize the justification from the key.
          final Justification tmp = tuple.getObject();

          // verify the same.
          assertEquals(jst, tmp);

          // no more justifications in the index.
          assertFalse(itr.hasNext());
        }
      }

      /*
       * test iterator with a single justification.
       */
      {
        final FullyBufferedJustificationIterator itr =
            new FullyBufferedJustificationIterator(store, expectedEntailment);

        assertTrue(itr.hasNext());

        final Justification tmp = itr.next();

        assertEquals(jst, tmp);
      }

      // an empty focusStore.
      final TempTripleStore focusStore =
          new TempTripleStore(store.getIndexManager().getTempStore(), store.getProperties(), store);

      try {

        /*
         * The inference (A rdf:type rdf:property) is grounded by the
         * explicit statement (U A Y).
         */

        assertTrue(
            Justification.isGrounded(
                inf,
                focusStore,
                store,
                expectedEntailment,
                false /* testHead */,
                true /* testFocusStore */,
                new VisitedSPOSet(focusStore.getIndexManager())));

        // add the statement (U A Y) to the focusStore.
        focusStore.addStatements(
            new SPO[] { //
              new SPO(U, A, Y, StatementEnum.Explicit) //
            }, //
            1);

        /*
         * The inference is no longer grounded since we have declared
         * that we are also retracting its grounds.
         */
        assertFalse(
            Justification.isGrounded(
                inf,
                focusStore,
                store,
                expectedEntailment,
                false /* testHead */,
                true /* testFocusStore */,
                new VisitedSPOSet(focusStore.getIndexManager())));

      } finally {

        /*
         * Destroy the temp kb, but not the backing TemporaryStore. That
         * will be destroyed when we destroy the IndexManager associated
         * with the main store (below).
         */
        focusStore.destroy();
      }

      /*
       * remove the justified statements.
       */

      assertEquals(
          1L,
          store
              .getAccessPath(expectedEntailment.s, expectedEntailment.p, expectedEntailment.o)
              .removeAll());

      /*
       * verify that the justification for that statement is gone.
       */
      {
        final ITupleIterator<?> itr =
            store.getSPORelation().getJustificationIndex().rangeIterator();

        assertFalse(itr.hasNext());
      }

    } finally {

      store.__tearDownUnitTest();
    }
  }
  public void mapOverShards(final Bundle<F>[] bundles) {

    /*
     * Sort the binding sets in the chunk by the fromKey associated with
     * each asBound predicate.
     */
    Arrays.sort(bundles);

    // The most recently discovered locator.
    PartitionLocator current = null;
    // The key order for [current]
    IKeyOrder<?> currentKeyOrder = null;

    //		// The list of binding sets which are bound for the current locator.
    //		List<IBindingSet> list = new LinkedList<IBindingSet>();

    final Iterator<Bundle<F>> bitr = Arrays.asList(bundles).iterator();

    while (bitr.hasNext()) {

      final Bundle<F> bundle = bitr.next();

      if (current != null
          && currentKeyOrder == bundle.keyOrder // same s/o index
          && BytesUtil.rangeCheck(
              bundle.fromKey, current.getLeftSeparatorKey(), current.getRightSeparatorKey())
          && BytesUtil.rangeCheck(
              bundle.toKey, current.getLeftSeparatorKey(), current.getRightSeparatorKey())) {

        /*
         * Optimization when the bundle fits inside of the last index
         * partition scanned (this optimization is only possible when
         * the asBound predicate will be mapped onto a single index
         * partition, but this is a very common case since we try to
         * choose selective indices for access paths).
         *
         * Note: The bundle MUST be for the scale-out index associated
         * with the last PartitionLocator. We enforce this constraint by
         * tracking the IKeyOrder for the last PartitionLocator and
         * verifying that the Bundle is associated with the same
         * IKeyOrder.
         *
         * Note: Bundle#compareTo() is written to group together the
         * [Bundle]s first by their IKeyOrder and then by their fromKey.
         * That provides the maximum possibility of reuse of the last
         * PartitionLocator. It also provides ordered within scale-out
         * index partition locator scans.
         */

        final IBuffer<IBindingSet[]> sink = op.getBuffer(current);

        sink.add(new IBindingSet[] {bundle.bindingSet});

        continue;
      }

      /*
       * Locator scan for the index partitions for that predicate as
       * bound.
       */
      final Iterator<PartitionLocator> itr =
          op.locatorScan(bundle.keyOrder, bundle.fromKey, bundle.toKey);

      // Clear the old partition locator.
      current = null;

      // Update key order for the partition that we are scanning.
      currentKeyOrder = bundle.keyOrder;

      // Scan locators.
      while (itr.hasNext()) {

        final PartitionLocator locator = current = itr.next();

        if (log.isTraceEnabled())
          log.trace(
              "adding bindingSet to buffer"
                  + ": asBound="
                  + bundle.asBound
                  + ", partitionId="
                  + locator.getPartitionId()
                  + ", dataService="
                  + locator.getDataServiceUUID()
                  + ", bindingSet="
                  + bundle.bindingSet);

        final IBuffer<IBindingSet[]> sink = op.getBuffer(locator);

        sink.add(new IBindingSet[] {bundle.bindingSet});
      }
    }
  }