Ejemplo n.º 1
0
 @Override
 public RevTree getCurrentVersion() {
   final Transaction transaction = getTransaction();
   if (null == transaction || Transaction.AUTO_COMMIT.equals(transaction)) {
     return super.getCurrentVersion();
   }
   final Name name = getName();
   RevTree headVersion = repository.getWorkingTree().getStagedVersion(name);
   return headVersion;
 }
  public void removeFeatures(Filter filter) throws IOException {
    // this we can optimize, it's a matter of mass updating the last
    // revisions (and before that, we have to compute the modified envelope)
    Filter versionedFilter =
        (Filter) store.buildVersionedFilter(schema.getTypeName(), filter, new RevisionInfo());

    // deal with the transaction, are we playing with auto commit or long running?
    Transaction t = getTransaction();
    boolean autoCommit = false;
    if (Transaction.AUTO_COMMIT.equals(t)) {
      t = new DefaultTransaction();
      autoCommit = true;
    }
    VersionedJdbcTransactionState state = store.wrapped.getVersionedJdbcTransactionState(t);

    // we need to mark the modified bounds and store their wgs84 version into the transaction
    ReferencedEnvelope bounds =
        locking.getBounds(new DefaultQuery(schema.getTypeName(), versionedFilter));
    if (bounds != null) {
      if (bounds.getCoordinateReferenceSystem() == null) {
        bounds = new ReferencedEnvelope(bounds, getSchema().getCoordinateReferenceSystem());
      }
      try {
        ReferencedEnvelope wgsBounds = null;
        if (bounds.getCoordinateReferenceSystem() != null)
          wgsBounds = bounds.transform(DefaultGeographicCRS.WGS84, true);
        else wgsBounds = bounds;
        state.expandDirtyBounds(wgsBounds);
        state.setTypeNameDirty(schema.getTypeName());
      } catch (Exception e) {
        throw new DataSourceException(
            "Problems computing and storing the " + "bounds affected by this feature removal", e);
      }
    }

    // now we can run the update
    locking.modifyFeatures(
        locking.getSchema().getDescriptor("expired"),
        new Long(state.getRevision()),
        versionedFilter);

    // if it's auto commit, don't forget to actually commit
    if (autoCommit) {
      t.commit();
      t.close();
    }
    store.listenerManager.fireFeaturesRemoved(schema.getTypeName(), t, bounds, false);
  }
Ejemplo n.º 3
0
 private void checkTransaction() {
   if (Transaction.AUTO_COMMIT.equals(getTransaction())) {
     throw new UnsupportedOperationException(
         "AUTO_COMMIT is not supported for versioned Feature Types");
   }
 }
  public void rollback(String toVersion, Filter filter, String[] userIds) throws IOException {
    // TODO: build an optimized version of this that can do the same work with a couple
    // of queries assuming the filter is fully encodable

    Transaction t = getTransaction();
    boolean autoCommit = false;
    if (Transaction.AUTO_COMMIT.equals(t)) {
      t = new DefaultTransaction();
      autoCommit = true;
    }

    // Gather feature modified after toVersion
    ModifiedFeatureIds mfids =
        store.getModifiedFeatureFIDs(schema.getTypeName(), toVersion, null, filter, userIds, t);
    FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);

    // grab the state, we need to mark as dirty all the features we are going to modify/re-insert
    VersionedJdbcTransactionState state = store.wrapped.getVersionedJdbcTransactionState(t);

    // remove all features that have been created and not deleted
    Set fidsToRemove = new HashSet(mfids.getCreated());
    fidsToRemove.removeAll(mfids.getDeleted());
    if (!fidsToRemove.isEmpty()) {
      removeFeatures(store.buildFidFilter(fidsToRemove));
      state.setTypeNameDirty(getSchema().getTypeName());
    }

    // reinstate all features that were there before toVersion and that
    // have been deleted after it. Notice this is an insertion, so to preserve
    // the fids I have to use low level writers where I can set all attributes manually
    // (we work on the assumption the wrapped data store maps all attributes of the primary
    // key in the feature itself)
    Set fidsToRecreate = new HashSet(mfids.getDeleted());
    fidsToRecreate.removeAll(mfids.getCreated());
    if (!fidsToRecreate.isEmpty()) {
      state.setTypeNameDirty(getSchema().getTypeName());
      state.setFidsDirty(getSchema().getTypeName(), fidsToRecreate);

      long revision = store.wrapped.getVersionedJdbcTransactionState(t).getRevision();
      Filter recreateFilter =
          store.buildVersionedFilter(
              schema.getTypeName(), store.buildFidFilter(fidsToRecreate), mfids.fromRevision);
      FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
      FeatureWriter<SimpleFeatureType, SimpleFeature> fw = null;
      try {
        DefaultQuery q = new DefaultQuery(schema.getTypeName(), recreateFilter);
        fr = store.wrapped.getFeatureReader(q, t);
        fw = store.wrapped.getFeatureWriterAppend(schema.getTypeName(), t);
        while (fr.hasNext()) {
          SimpleFeature original = fr.next();
          SimpleFeature restored = fw.next();
          for (int i = 0; i < original.getFeatureType().getAttributeCount(); i++) {
            restored.setAttribute(i, original.getAttribute(i));
          }
          restored.setAttribute("revision", new Long(revision));
          restored.setAttribute("expired", new Long(Long.MAX_VALUE));
          fw.write();
        }
      } catch (IllegalAttributeException iae) {
        throw new DataSourceException(
            "Unexpected error occurred while " + "restoring deleted featues", iae);
      } finally {
        if (fr != null) fr.close();
        if (fw != null) fw.close();
      }
    }

    // Now onto the modified features, that were there, and still are there.
    // Since we cannot get a sorted writer we have to do a kind of inner loop scan
    // (note, a parellel scan of similarly sorted reader and writer would be more
    // efficient, but writer sorting is not there...)
    // Here it's possible to work against the external API, thought it would be more
    // efficient (but more complex) to work against the wrapped one.
    if (!mfids.getModified().isEmpty()) {
      state.setTypeNameDirty(getSchema().getTypeName());
      state.setFidsDirty(getSchema().getTypeName(), mfids.getModified());

      Filter modifiedIdFilter = store.buildFidFilter(mfids.getModified());
      Filter mifCurrent =
          store.buildVersionedFilter(schema.getTypeName(), modifiedIdFilter, new RevisionInfo());
      FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
      FeatureWriter<SimpleFeatureType, SimpleFeature> fw = null;
      try {
        fw = store.getFeatureWriter(schema.getTypeName(), mifCurrent, t);
        while (fw.hasNext()) {
          SimpleFeature current = fw.next();
          Filter currIdFilter = ff.id(Collections.singleton(ff.featureId(current.getID())));
          Filter cidToVersion =
              store.buildVersionedFilter(schema.getTypeName(), currIdFilter, mfids.fromRevision);
          DefaultQuery q = new DefaultQuery(schema.getTypeName(), cidToVersion);
          q.setVersion(mfids.fromRevision.toString());
          fr = store.getFeatureReader(q, t);
          SimpleFeature original = fr.next();
          for (int i = 0; i < original.getFeatureType().getAttributeCount(); i++) {
            current.setAttribute(i, original.getAttribute(i));
          }
          fr.close();
          fw.write();
        }
      } catch (IllegalAttributeException iae) {
        throw new DataSourceException(
            "Unexpected error occurred while " + "restoring deleted featues", iae);
      } finally {
        if (fr != null) fr.close();
        if (fw != null) fw.close();
      }
    }

    // if it's auto commit, don't forget to actually commit
    if (autoCommit) {
      t.commit();
      t.close();
    }
  }