public FeatureCollection<SimpleFeatureType, SimpleFeature> getVersionedFeatures(Query query)
      throws IOException {
    final SimpleFeatureType ft = getSchema();

    // check the feature type is the right one
    final String typeName = ft.getTypeName();
    if (query.getTypeName() != null && !query.getTypeName().equals(typeName))
      throw new IOException("Incompatible type, this class can access only " + typeName);

    // make sure the view is around
    if (!Arrays.asList(store.wrapped.getTypeNames()).contains(store.getVFCViewName(typeName)))
      store.createVersionedFeatureCollectionView(typeName);

    // we have to hit the view
    DefaultQuery vq = new DefaultQuery(query);
    vq.setTypeName(VersionedPostgisDataStore.getVFCViewName(typeName));
    vq = store.buildVersionedQuery(vq);
    FeatureCollection<SimpleFeatureType, SimpleFeature> fc =
        store
            .wrapped
            .getFeatureSource(VersionedPostgisDataStore.getVFCViewName(typeName))
            .getFeatures(vq);
    final SimpleFeatureType fcSchema = fc.getSchema();
    // build a renamed feature type with the same attributes as the feature collection
    SimpleFeatureTypeBuilder builder = new SimpleFeatureTypeBuilder();
    builder.init(ft);
    builder.setAttributes(fc.getSchema().getAttributeDescriptors());
    SimpleFeatureType renamedFt = builder.buildFeatureType();
    return new ReTypingFeatureCollection(fc, renamedFt);
  }
  public FeatureDiffReaderImpl getDifferences(
      String fromVersion, String toVersion, Filter filter, String[] userIds) throws IOException {
    if (filter == null) filter = Filter.INCLUDE;

    RevisionInfo r1 = new RevisionInfo(fromVersion);
    RevisionInfo r2 = new RevisionInfo(toVersion);

    // gather modified ids
    ModifiedFeatureIds mfids =
        store.getModifiedFeatureFIDs(
            schema.getTypeName(), fromVersion, toVersion, filter, userIds, getTransaction());

    // build all the filters to gather created, deleted and modified features at the appropriate
    // revisions, depending also on wheter creation/deletion should be swapped or not
    FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);
    VersionedFIDMapper mapper = (VersionedFIDMapper) store.getFIDMapper(schema.getTypeName());

    return new FeatureDiffReaderImpl(store, getTransaction(), schema, r1, r2, mapper, mfids);
  }
  public void removeFeatures(Filter filter) throws IOException {
    // this we can optimize, it's a matter of mass updating the last
    // revisions (and before that, we have to compute the modified envelope)
    Filter versionedFilter =
        (Filter) store.buildVersionedFilter(schema.getTypeName(), filter, new RevisionInfo());

    // deal with the transaction, are we playing with auto commit or long running?
    Transaction t = getTransaction();
    boolean autoCommit = false;
    if (Transaction.AUTO_COMMIT.equals(t)) {
      t = new DefaultTransaction();
      autoCommit = true;
    }
    VersionedJdbcTransactionState state = store.wrapped.getVersionedJdbcTransactionState(t);

    // we need to mark the modified bounds and store their wgs84 version into the transaction
    ReferencedEnvelope bounds =
        locking.getBounds(new DefaultQuery(schema.getTypeName(), versionedFilter));
    if (bounds != null) {
      if (bounds.getCoordinateReferenceSystem() == null) {
        bounds = new ReferencedEnvelope(bounds, getSchema().getCoordinateReferenceSystem());
      }
      try {
        ReferencedEnvelope wgsBounds = null;
        if (bounds.getCoordinateReferenceSystem() != null)
          wgsBounds = bounds.transform(DefaultGeographicCRS.WGS84, true);
        else wgsBounds = bounds;
        state.expandDirtyBounds(wgsBounds);
        state.setTypeNameDirty(schema.getTypeName());
      } catch (Exception e) {
        throw new DataSourceException(
            "Problems computing and storing the " + "bounds affected by this feature removal", e);
      }
    }

    // now we can run the update
    locking.modifyFeatures(
        locking.getSchema().getDescriptor("expired"),
        new Long(state.getRevision()),
        versionedFilter);

    // if it's auto commit, don't forget to actually commit
    if (autoCommit) {
      t.commit();
      t.close();
    }
    store.listenerManager.fireFeaturesRemoved(schema.getTypeName(), t, bounds, false);
  }
  public FeatureCollection<SimpleFeatureType, SimpleFeature> getLog(
      String fromVersion, String toVersion, Filter filter, String[] userIds, int maxRows)
      throws IOException {
    if (filter == null) filter = Filter.INCLUDE;
    RevisionInfo r1 = new RevisionInfo(fromVersion);
    RevisionInfo r2 = new RevisionInfo(toVersion);

    boolean swapped = false;
    if (r1.revision > r2.revision) {
      // swap them
      RevisionInfo tmpr = r1;
      r1 = r2;
      r2 = tmpr;
      String tmps = toVersion;
      toVersion = fromVersion;
      fromVersion = tmps;
      swapped = true;
    }

    // We implement this exactly as described. Happily, it seems Postgis does not have
    // sql lentgh limitations. Yet, if would be a lot better if we could encode this
    // as a single sql query with subqueries... (but not all filters are encodable...)
    ModifiedFeatureIds mfids =
        store.getModifiedFeatureFIDs(
            schema.getTypeName(), fromVersion, toVersion, filter, userIds, getTransaction());
    Set ids = new HashSet(mfids.getCreated());
    ids.addAll(mfids.getDeleted());
    ids.addAll(mfids.getModified());

    // grab the eventually modified revisions from mfids
    r1 = mfids.fromRevision;
    r2 = mfids.toRevision;

    // no changes?
    if (ids.isEmpty()) return new EmptyFeatureCollection(schema);

    // Create a filter that sounds like:
    // (revision > r1 and revision <= r2) or (expired > r1 and expired <= r2) and fid in
    // (fidlist)
    FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);
    Filter fidFilter = store.buildFidFilter(ids);
    Filter transformedFidFilter = store.transformFidFilter(schema.getTypeName(), fidFilter);
    Filter revGrR1 = ff.greater(ff.property("revision"), ff.literal(r1.revision));
    Filter revLeR2 = ff.lessOrEqual(ff.property("revision"), ff.literal(r2.revision));
    Filter expGrR1 = ff.greater(ff.property("expired"), ff.literal(r1.revision));
    Filter expLeR2 = ff.lessOrEqual(ff.property("expired"), ff.literal(r2.revision));
    Filter versionFilter =
        ff.and(transformedFidFilter, ff.or(ff.and(revGrR1, revLeR2), ff.and(expGrR1, expLeR2)));

    // We just want the revision and expired, build a query against the real feature type
    DefaultQuery q =
        new DefaultQuery(schema.getTypeName(), versionFilter, new String[] {"revision", "expired"});
    FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
    SortedSet revisions = new TreeSet();
    try {
      fr = store.wrapped.getFeatureReader(q, getTransaction());
      while (fr.hasNext()) {
        SimpleFeature f = fr.next();
        Long revision = (Long) f.getAttribute(0);
        if (revision.longValue() > r1.revision) revisions.add(revision);
        Long expired = (Long) f.getAttribute(1);
        if (expired.longValue() != Long.MAX_VALUE && expired.longValue() > r1.revision)
          revisions.add(expired);
      }
    } catch (Exception e) {
      throw new DataSourceException("Error reading modified revisions from datastore", e);
    } finally {
      if (fr != null) fr.close();
    }

    // now, we have a list of revisions between a min and a max
    // let's try to build a fid filter with revisions from the biggest to the smallest
    Set revisionIdSet = new HashSet();
    for (Iterator it = revisions.iterator(); it.hasNext(); ) {
      Long rev = (Long) it.next();
      revisionIdSet.add(
          ff.featureId(VersionedPostgisDataStore.TBL_CHANGESETS + "." + rev.toString()));
    }
    if (revisionIdSet.isEmpty()) return new EmptyFeatureCollection(schema);
    Filter revisionFilter = ff.id(revisionIdSet);

    // return the changelog
    // TODO: sort on revision descending. Unfortunately, to do so we have to fix fid mappers,
    // so that auto-increment can return revision among the attributes, and at the same
    // time simply allow not include fid attributes in the insert queries (or provide a
    // "default"
    // value for them).
    FeatureSource<SimpleFeatureType, SimpleFeature> changesets =
        (FeatureSource<SimpleFeatureType, SimpleFeature>)
            store.getFeatureSource(VersionedPostgisDataStore.TBL_CHANGESETS);
    DefaultQuery sq = new DefaultQuery();
    sq.setFilter(revisionFilter);
    final SortOrder order = swapped ? SortOrder.ASCENDING : SortOrder.DESCENDING;
    sq.setSortBy(new SortBy[] {ff.sort("revision", order)});
    if (maxRows > 0) sq.setMaxFeatures(maxRows);
    return changesets.getFeatures(sq);
  }
  public void rollback(String toVersion, Filter filter, String[] userIds) throws IOException {
    // TODO: build an optimized version of this that can do the same work with a couple
    // of queries assuming the filter is fully encodable

    Transaction t = getTransaction();
    boolean autoCommit = false;
    if (Transaction.AUTO_COMMIT.equals(t)) {
      t = new DefaultTransaction();
      autoCommit = true;
    }

    // Gather feature modified after toVersion
    ModifiedFeatureIds mfids =
        store.getModifiedFeatureFIDs(schema.getTypeName(), toVersion, null, filter, userIds, t);
    FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);

    // grab the state, we need to mark as dirty all the features we are going to modify/re-insert
    VersionedJdbcTransactionState state = store.wrapped.getVersionedJdbcTransactionState(t);

    // remove all features that have been created and not deleted
    Set fidsToRemove = new HashSet(mfids.getCreated());
    fidsToRemove.removeAll(mfids.getDeleted());
    if (!fidsToRemove.isEmpty()) {
      removeFeatures(store.buildFidFilter(fidsToRemove));
      state.setTypeNameDirty(getSchema().getTypeName());
    }

    // reinstate all features that were there before toVersion and that
    // have been deleted after it. Notice this is an insertion, so to preserve
    // the fids I have to use low level writers where I can set all attributes manually
    // (we work on the assumption the wrapped data store maps all attributes of the primary
    // key in the feature itself)
    Set fidsToRecreate = new HashSet(mfids.getDeleted());
    fidsToRecreate.removeAll(mfids.getCreated());
    if (!fidsToRecreate.isEmpty()) {
      state.setTypeNameDirty(getSchema().getTypeName());
      state.setFidsDirty(getSchema().getTypeName(), fidsToRecreate);

      long revision = store.wrapped.getVersionedJdbcTransactionState(t).getRevision();
      Filter recreateFilter =
          store.buildVersionedFilter(
              schema.getTypeName(), store.buildFidFilter(fidsToRecreate), mfids.fromRevision);
      FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
      FeatureWriter<SimpleFeatureType, SimpleFeature> fw = null;
      try {
        DefaultQuery q = new DefaultQuery(schema.getTypeName(), recreateFilter);
        fr = store.wrapped.getFeatureReader(q, t);
        fw = store.wrapped.getFeatureWriterAppend(schema.getTypeName(), t);
        while (fr.hasNext()) {
          SimpleFeature original = fr.next();
          SimpleFeature restored = fw.next();
          for (int i = 0; i < original.getFeatureType().getAttributeCount(); i++) {
            restored.setAttribute(i, original.getAttribute(i));
          }
          restored.setAttribute("revision", new Long(revision));
          restored.setAttribute("expired", new Long(Long.MAX_VALUE));
          fw.write();
        }
      } catch (IllegalAttributeException iae) {
        throw new DataSourceException(
            "Unexpected error occurred while " + "restoring deleted featues", iae);
      } finally {
        if (fr != null) fr.close();
        if (fw != null) fw.close();
      }
    }

    // Now onto the modified features, that were there, and still are there.
    // Since we cannot get a sorted writer we have to do a kind of inner loop scan
    // (note, a parellel scan of similarly sorted reader and writer would be more
    // efficient, but writer sorting is not there...)
    // Here it's possible to work against the external API, thought it would be more
    // efficient (but more complex) to work against the wrapped one.
    if (!mfids.getModified().isEmpty()) {
      state.setTypeNameDirty(getSchema().getTypeName());
      state.setFidsDirty(getSchema().getTypeName(), mfids.getModified());

      Filter modifiedIdFilter = store.buildFidFilter(mfids.getModified());
      Filter mifCurrent =
          store.buildVersionedFilter(schema.getTypeName(), modifiedIdFilter, new RevisionInfo());
      FeatureReader<SimpleFeatureType, SimpleFeature> fr = null;
      FeatureWriter<SimpleFeatureType, SimpleFeature> fw = null;
      try {
        fw = store.getFeatureWriter(schema.getTypeName(), mifCurrent, t);
        while (fw.hasNext()) {
          SimpleFeature current = fw.next();
          Filter currIdFilter = ff.id(Collections.singleton(ff.featureId(current.getID())));
          Filter cidToVersion =
              store.buildVersionedFilter(schema.getTypeName(), currIdFilter, mfids.fromRevision);
          DefaultQuery q = new DefaultQuery(schema.getTypeName(), cidToVersion);
          q.setVersion(mfids.fromRevision.toString());
          fr = store.getFeatureReader(q, t);
          SimpleFeature original = fr.next();
          for (int i = 0; i < original.getFeatureType().getAttributeCount(); i++) {
            current.setAttribute(i, original.getAttribute(i));
          }
          fr.close();
          fw.write();
        }
      } catch (IllegalAttributeException iae) {
        throw new DataSourceException(
            "Unexpected error occurred while " + "restoring deleted featues", iae);
      } finally {
        if (fr != null) fr.close();
        if (fw != null) fw.close();
      }
    }

    // if it's auto commit, don't forget to actually commit
    if (autoCommit) {
      t.commit();
      t.close();
    }
  }
 public int getCount(Query query) throws IOException {
   DefaultQuery versionedQuery = store.buildVersionedQuery(getTypedQuery(query));
   return locking.getCount(versionedQuery);
 }
 public ReferencedEnvelope getBounds(Query query) throws IOException {
   DefaultQuery versionedQuery = store.buildVersionedQuery(getTypedQuery(query));
   return locking.getBounds(versionedQuery);
 }