/**
  * Scan each required path in each of the workspaces.
  *
  * @param operation the scanning operation that is to be called for each workspace & path
  *     combination; may not be null
  */
 public void onEachPathInWorkspace(ScanOperation operation) {
   for (Map.Entry<String, PathToScan> entry : pathsToScanByWorkspace.entries()) {
     String workspaceName = entry.getKey();
     PathToScan pathToScan = entry.getValue();
     try {
       for (IndexingCallback callback : pathToScan) {
         try {
           callback.beforeIndexing();
         } catch (RuntimeException e) {
           Logger.getLogger(getClass())
               .error(
                   e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
         }
       }
       operation.scan(workspaceName, pathToScan.path());
     } catch (RuntimeException e) {
       Logger.getLogger(getClass())
           .error(e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
     } finally {
       for (IndexingCallback callback : pathToScan) {
         try {
           callback.afterIndexing();
         } catch (RuntimeException e) {
           Logger.getLogger(getClass())
               .error(
                   e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
         }
       }
     }
   }
 }
예제 #2
0
  @Override
  public NodeTypeIterator registerNodeTypes(URL url, boolean allowUpdate)
      throws IOException, RepositoryException {
    String content = IoUtil.read(url.openStream());
    if (content.startsWith("<?xml")) {
      // This is Jackrabbit XML format ...
      return registerNodeTypes(
          importFromXml(new InputSource(new StringReader(content))), allowUpdate);
    }
    // Assume this is CND format ...
    CndImporter importer = new CndImporter(context(), true);
    Problems problems = new SimpleProblems();
    importer.importFrom(content, problems, url.toExternalForm());

    // Check for (and report) any problems ...
    if (problems.hasProblems()) {
      // There are errors and/or warnings, so report them ...
      String summary = messageFrom(problems);
      if (problems.hasErrors()) {
        String msg = JcrI18n.errorsParsingNodeTypeDefinitions.text(url.toExternalForm(), summary);
        throw new RepositoryException(msg);
      }
      // Otherwise, there are warnings, so log them ...
      I18n msg = JcrI18n.warningsParsingNodeTypeDefinitions;
      Logger.getLogger(getClass()).warn(msg, url.toExternalForm(), summary);
    }

    // Register the node types ...
    return registerNodeTypes(importer.getNodeTypeDefinitions(), allowUpdate);
  }
    protected Indexes(
        ExecutionContext context, Collection<IndexDefinition> defns, NodeTypes nodeTypes) {
      // Identify the subtypes for each node type, and do this before we build any views ...
      if (!defns.isEmpty()) {
        Map<Name, Collection<String>> subtypesByName = new HashMap<>();
        for (JcrNodeType nodeType : nodeTypes.getAllNodeTypes()) {
          // For each of the supertypes ...
          for (JcrNodeType supertype : nodeType.getTypeAndSupertypes()) {
            Collection<String> types = subtypesByName.get(supertype.getInternalName());
            if (types == null) {
              types = new LinkedList<>();
              subtypesByName.put(supertype.getInternalName(), types);
            }
            types.add(nodeType.getName());
          }
        }

        // Now process all of the indexes ...
        NameFactory names = context.getValueFactories().getNameFactory();
        Set<Name> nodeTypeNames = new HashSet<>();
        for (IndexDefinition defn : defns) {

          // Determine all of the node types that are subtypes of any columns
          nodeTypeNames.clear();
          Name nodeTypeName = names.create(defn.getNodeTypeName());

          if (!subtypesByName.containsKey(nodeTypeName)) {
            Logger.getLogger(getClass())
                .warn(
                    JcrI18n.errorIndexing,
                    "not creating index "
                        + defn.getName()
                        + " because of unknown nodeType "
                        + nodeTypeName.getString());
            continue;
          }

          indexByName.put(defn.getName(), defn);

          // Now find out all of the node types that are or subtype the named node types ...
          for (String typeAndSubtype : subtypesByName.get(nodeTypeName)) {
            Map<String, Collection<IndexDefinition>> byProvider =
                indexesByProviderByNodeTypeName.get(typeAndSubtype);
            if (byProvider == null) {
              byProvider = new HashMap<>();
              indexesByProviderByNodeTypeName.put(typeAndSubtype, byProvider);
            }
            Collection<IndexDefinition> indexes = byProvider.get(defn.getProviderName());
            if (indexes == null) {
              indexes = new LinkedList<>();
              byProvider.put(typeAndSubtype, indexes);
            }
            indexes.add(defn);
          }
        }
      }
    }
예제 #4
0
  private void initializeModeShapeEngine(
      final OperationContext context,
      final ModelNode operation,
      ModelNode model,
      final List<ServiceController<?>> newControllers) {
    ServiceTarget target = context.getServiceTarget();

    final JBossLifeCycleListener shutdownListener = new JBossLifeCycleListener();

    engine = buildModeShapeEngine(model);

    // Engine service
    ServiceBuilder<JcrEngine> engineBuilder =
        target.addService(ModeShapeServiceNames.ENGINE, engine);
    engineBuilder.setInitialMode(ServiceController.Mode.ACTIVE);
    ServiceController<JcrEngine> controller = engineBuilder.install();
    controller.getServiceContainer().addTerminateListener(shutdownListener);
    newControllers.add(controller);

    // JNDI Binding
    final ReferenceFactoryService<JcrEngine> referenceFactoryService =
        new ReferenceFactoryService<JcrEngine>();
    final ServiceName referenceFactoryServiceName =
        ModeShapeServiceNames.ENGINE.append("reference-factory"); // $NON-NLS-1$
    final ServiceBuilder<?> referenceBuilder =
        target.addService(referenceFactoryServiceName, referenceFactoryService);
    referenceBuilder.addDependency(
        ModeShapeServiceNames.ENGINE, JcrEngine.class, referenceFactoryService.getInjector());
    referenceBuilder.setInitialMode(ServiceController.Mode.ACTIVE);

    final ContextNames.BindInfo bindInfo =
        ContextNames.bindInfoFor(ModeShapeJndiNames.JNDI_BASE_NAME);
    final BinderService binderService = new BinderService(bindInfo.getBindName());
    final ServiceBuilder<?> binderBuilder =
        target.addService(bindInfo.getBinderServiceName(), binderService);
    binderBuilder.addDependency(
        ModeShapeServiceNames.ENGINE,
        JcrEngine.class,
        new ManagedReferenceInjector<JcrEngine>(binderService.getManagedObjectInjector()));
    binderBuilder.addDependency(
        bindInfo.getParentContextServiceName(),
        ServiceBasedNamingStore.class,
        binderService.getNamingStoreInjector());
    binderBuilder.setInitialMode(ServiceController.Mode.ACTIVE);

    Logger.getLogger(getClass())
        .debug("Binding ModeShape to JNDI name '{0}'", bindInfo.getAbsoluteJndiName());

    newControllers.add(referenceBuilder.install());
    newControllers.add(binderBuilder.install());
  }
예제 #5
0
  /**
   * Obtain a new temporary directory that can be used by a transient binary store. Note that none
   * of the directories are actually created at this time, but are instead created (if needed)
   * during {@link #initializeStorage(File)}.
   *
   * @return the new directory; never null
   */
  private static File newTempDirectory() {
    String tempDirName = System.getProperty(JAVA_IO_TMPDIR);
    if (tempDirName == null) {
      throw new SystemFailureException(
          JcrI18n.tempDirectorySystemPropertyMustBeSet.text(JAVA_IO_TMPDIR));
    }
    File tempDir = new File(tempDirName);
    if (System.getProperty(JBOSS_SERVER_DATA_DIR) == null) {
      // We're not running in JBoss AS (where we always specify the directory where the binaries are
      // stored),
      // so log where the temporary directory is ...
      Logger.getLogger(TransientBinaryStore.class)
          .info(JcrI18n.tempDirectoryLocation, tempDir.getAbsolutePath());
    }

    // Create a temporary directory in the "java.io.tmpdir" directory ...
    return new File(tempDir, "modeshape-binary-store");
  }
예제 #6
0
 /**
  * Ensures that the directory used by this binary store exists and can be both read and written
  * to.
  *
  * @throws BinaryStoreException if the directory cannot be written to, read, or (if needed)
  *     created
  */
 @Override
 protected void initializeStorage(File directory) throws BinaryStoreException {
   if (!directory.exists()) {
     Logger.getLogger(getClass())
         .debug(
             "Creating temporary directory for transient binary store: {0}",
             directory.getAbsolutePath());
     directory.mkdirs();
   }
   if (!directory.canRead()) {
     throw new BinaryStoreException(
         JcrI18n.unableToReadTemporaryDirectory.text(directory.getAbsolutePath(), JAVA_IO_TMPDIR));
   }
   if (!directory.canWrite()) {
     throw new BinaryStoreException(
         JcrI18n.unableToWriteTemporaryDirectory.text(
             directory.getAbsolutePath(), JAVA_IO_TMPDIR));
   }
 }
/** Records nodes and unresolved references. */
@NotThreadSafe
public class ReferenceResolver {

  static final Logger LOGGER = Logger.getLogger(ReferenceResolver.class);

  public static final Map<String, String> STANDARD_DATA_TYPE_URLS_BY_UUID;
  public static final Map<String, String> STANDARD_DATA_TYPE_URLS_TO_NAMES;
  public static final Map<String, String> STANDARD_DATA_TYPE_UUIDS_BY_NAMES;

  static {
    final Map<String, String> dataTypes = new HashMap<String, String>();
    // Really old models have simple data types hrefs that contain UUIDs ...
    String sdtUrl = "http://www.metamatrix.com/metamodels/SimpleDatatypes-instance#mmuuid:";
    dataTypes.put(sdtUrl + "4ca2ae00-3a95-1e20-921b-eeee28353879", "NMTOKEN");
    dataTypes.put(sdtUrl + "4df43700-3b13-1e20-921b-eeee28353879", "normalizedString");
    dataTypes.put(sdtUrl + "3425cb80-d844-1e20-9027-be6d2c3b8b3a", "token");
    dataTypes.put(sdtUrl + "d4d980c0-e623-1e20-8c26-a038c6ed7576", "language");
    dataTypes.put(sdtUrl + "e66c4600-e65b-1e20-8c26-a038c6ed7576", "Name");
    dataTypes.put(sdtUrl + "ac00e000-e676-1e20-8c26-a038c6ed7576", "NCName");
    dataTypes.put(sdtUrl + "4b0f8500-e6a6-1e20-8c26-a038c6ed7576", "NMTOKENS");
    dataTypes.put(sdtUrl + "dd33ff40-e6df-1e20-8c26-a038c6ed7576", "IDREF");
    dataTypes.put(sdtUrl + "88b13dc0-e702-1e20-8c26-a038c6ed7576", "ID");
    dataTypes.put(sdtUrl + "9fece300-e71a-1e20-8c26-a038c6ed7576", "ENTITY");
    dataTypes.put(sdtUrl + "3c99f780-e72d-1e20-8c26-a038c6ed7576", "IDREFS");
    dataTypes.put(sdtUrl + "20360100-e742-1e20-8c26-a038c6ed7576", "ENTITIES");
    dataTypes.put(sdtUrl + "45da3500-e78f-1e20-8c26-a038c6ed7576", "integer");
    dataTypes.put(sdtUrl + "cbdd6e40-b9d2-1e21-8c26-a038c6ed7576", "nonPositiveInteger");
    dataTypes.put(sdtUrl + "0e081200-b8a4-1e21-b812-969c8fc8b016", "nonNegativeInteger");
    dataTypes.put(sdtUrl + "86d29280-b8d3-1e21-b812-969c8fc8b016", "negativeInteger");
    dataTypes.put(sdtUrl + "8cdee840-b900-1e21-b812-969c8fc8b016", "long");
    dataTypes.put(sdtUrl + "33add3c0-b98d-1e21-b812-969c8fc8b016", "int");
    dataTypes.put(sdtUrl + "5bbcf140-b9ae-1e21-b812-969c8fc8b016", "short");
    dataTypes.put(sdtUrl + "26dc1cc0-b9c8-1e21-b812-969c8fc8b016", "byte");
    dataTypes.put(sdtUrl + "1cbbd380-b9ea-1e21-b812-969c8fc8b016", "positiveInteger");
    dataTypes.put(sdtUrl + "54b98780-ba14-1e21-b812-969c8fc8b016", "unsignedLong");
    dataTypes.put(sdtUrl + "badcbd80-ba63-1e21-b812-969c8fc8b016", "unsignedInt");
    dataTypes.put(sdtUrl + "327093c0-ba88-1e21-b812-969c8fc8b016", "unsignedShort");
    dataTypes.put(sdtUrl + "cff745c0-baa2-1e21-b812-969c8fc8b016", "unsignedByte");
    dataTypes.put(sdtUrl + "bf6c34c0-c442-1e24-9b01-c8207cd53eb7", "string");
    dataTypes.put(sdtUrl + "dc476100-c483-1e24-9b01-c8207cd53eb7", "boolean");
    dataTypes.put(sdtUrl + "569dfa00-c456-1e24-9b01-c8207cd53eb7", "decimal");
    dataTypes.put(sdtUrl + "d86b0d00-c48a-1e24-9b01-c8207cd53eb7", "float");
    dataTypes.put(sdtUrl + "1f18b140-c4a3-1e24-9b01-c8207cd53eb7", "double");
    dataTypes.put(sdtUrl + "3b892180-c4a7-1e24-9b01-c8207cd53eb7", "time");
    dataTypes.put(sdtUrl + "65dcde00-c4ab-1e24-9b01-c8207cd53eb7", "date");
    dataTypes.put(sdtUrl + "62472700-a064-1e26-9b08-d6079ebe1f0d", "char");
    dataTypes.put(sdtUrl + "822b9a40-a066-1e26-9b08-d6079ebe1f0d", "biginteger");
    dataTypes.put(sdtUrl + "f2249740-a078-1e26-9b08-d6079ebe1f0d", "bigdecimal");
    dataTypes.put(sdtUrl + "6d9809c0-a07e-1e26-9b08-d6079ebe1f0d", "timestamp");
    dataTypes.put(sdtUrl + "051a0640-b4e8-1e26-9f33-b76fd9d5fa79", "object");
    dataTypes.put(sdtUrl + "559646c0-4941-1ece-b22b-f49159d22ad3", "clob");
    dataTypes.put(sdtUrl + "5a793100-1836-1ed0-ba0f-f2334f5fbf95", "blob");
    dataTypes.put(sdtUrl + "43f5274e-55e1-1f87-ba1c-eea49143eb32", "XMLLiteral");
    dataTypes.put(sdtUrl + "28d98540-b3e7-1e2a-9a03-beb8638ffd21", "duration");
    dataTypes.put(sdtUrl + "5c69dec0-b3ea-1e2a-9a03-beb8638ffd21", "dateTime");
    dataTypes.put(sdtUrl + "17d08040-b3ed-1e2a-9a03-beb8638ffd21", "gYearMonth");
    dataTypes.put(sdtUrl + "b02c7600-b3f2-1e2a-9a03-beb8638ffd21", "gYear");
    dataTypes.put(sdtUrl + "6e604140-b3f5-1e2a-9a03-beb8638ffd21", "gMonthDay");
    dataTypes.put(sdtUrl + "860b7dc0-b3f8-1e2a-9a03-beb8638ffd21", "gDay");
    dataTypes.put(sdtUrl + "187f5580-b3fb-1e2a-9a03-beb8638ffd21", "gMonth");
    dataTypes.put(sdtUrl + "6247ec80-e8a4-1e2a-b433-fb67ea35c07e", "anyURI");
    dataTypes.put(sdtUrl + "eeb5d780-e8c3-1e2a-b433-fb67ea35c07e", "QName");
    dataTypes.put(sdtUrl + "3dcaf900-e8dc-1e2a-b433-fb67ea35c07e", "NOTATION");
    dataTypes.put(sdtUrl + "d9998500-ebba-1e2a-9319-8eaa9b2276c7", "hexBinary");
    dataTypes.put(sdtUrl + "b4c99380-ebc6-1e2a-9319-8eaa9b2276c7", "base64Binary");

    // Populate the name-to-UUID mapping ...
    final Map<String, String> dataTypesByUuid = new HashMap<String, String>();
    final Map<String, String> dataTypeUuidsByName = new HashMap<String, String>();
    for (final Map.Entry<String, String> entry : dataTypes.entrySet()) {
      final String url = entry.getKey();
      final String name = entry.getValue();
      String uuidString = url.substring(sdtUrl.length());
      try {
        final String uuid = UUID.fromString(uuidString).toString();
        dataTypesByUuid.put(uuid, name);
        dataTypeUuidsByName.put(name, uuid);
      } catch (final IllegalArgumentException e) {
        LOGGER.error(e, TeiidI18n.uuidNotValid, uuidString);
      }
    }

    // Newer models have simple data types hrefs that contain names ...
    final String xsdUrl = "http://www.w3.org/2001/XMLSchema#";
    for (final String value : new HashSet<String>(dataTypes.values())) {
      dataTypes.put(xsdUrl + value, value);
    }
    sdtUrl = "http://www.metamatrix.com/metamodels/SimpleDatatypes-instance#";
    for (final String value : new HashSet<String>(dataTypes.values())) {
      dataTypes.put(sdtUrl + value, value);
    }

    STANDARD_DATA_TYPE_URLS_TO_NAMES = Collections.unmodifiableMap(dataTypes);
    STANDARD_DATA_TYPE_URLS_BY_UUID = Collections.unmodifiableMap(dataTypesByUuid);
    STANDARD_DATA_TYPE_UUIDS_BY_NAMES = Collections.unmodifiableMap(dataTypeUuidsByName);
  }

  // key = uuid, value = UnresolvedReference
  private final Map<String, UnresolvedReference> unresolved =
      new HashMap<String, UnresolvedReference>();

  // key = uuid, value = Node
  private final Map<String, Node> uuidToNode = new HashMap<String, Node>();

  // key = uuid, value = XmiElement
  private final Map<String, XmiElement> uuidToXmiElement = new HashMap<String, XmiElement>();

  /**
   * @param xmiUuid the UUID of the model object whose node has not been created (cannot be <code>
   *     null</code>)
   * @return the unresolved reference (never <code>null</code>)
   * @throws Exception if a node for the specified UUID does exist
   */
  public UnresolvedReference addUnresolvedReference(String xmiUuid) throws Exception {
    CheckArg.isNotEmpty(xmiUuid, "xmiUuid");

    xmiUuid = resolveInternalReference(xmiUuid);

    if (this.uuidToNode.containsKey(xmiUuid)) {
      throw new Exception(TeiidI18n.illegalUnresolvedReference.text(xmiUuid));
    }

    // see if already unresolved
    UnresolvedReference unresolved = this.unresolved.get(xmiUuid);

    // create unresolved if necessary
    if (unresolved == null) {
      unresolved = new UnresolvedReference(xmiUuid);
      this.unresolved.put(xmiUuid, unresolved);
      LOGGER.debug("added '{0}' to the list of unresolved references", xmiUuid);
    }

    return unresolved;
  }

  /**
   * @param xmiUuid the UUID of the node being requested (cannot be <code>null</code> or empty)
   * @return the node or <code>null</code> if not found
   */
  Node getNode(final String xmiUuid) {
    CheckArg.isNotEmpty(xmiUuid, "xmiUuid");
    return this.uuidToNode.get(xmiUuid);
  }

  /** @return the unresolved references (never <code>null</code>) */
  public Map<String, UnresolvedReference> getUnresolved() {
    return this.unresolved;
  }

  /** @return a map of the registered XMI elements keyed by UUID (never <code>null</code>) */
  Map<String, XmiElement> getUuidMappings() {
    return this.uuidToXmiElement;
  }

  /**
   * @param value the value being checked to see if it is a reference (cannot be <code>null</code>
   *     or empty)
   * @return <code>true</code> if value is a reference
   */
  public boolean isReference(final String value) {
    CheckArg.isNotEmpty(value, "value");
    return (value.startsWith(CoreLexicon.ModelId.MM_HREF_PREFIX));
  }

  /**
   * @param xmiUuid the UUID associated with the node being registered (cannot be <code>null</code>
   *     or empty)
   * @param node the node being registered (cannot be <code>null</code>)
   */
  public void record(String xmiUuid, final Node node) {
    CheckArg.isNotEmpty(xmiUuid, "xmiUuid");
    CheckArg.isNotNull(node, "node");

    if (xmiUuid.startsWith(CoreLexicon.ModelId.MM_UUID_PREFIX)) {
      xmiUuid = xmiUuid.substring(CoreLexicon.ModelId.MM_UUID_PREFIX.length() + 1);
    }

    this.uuidToNode.put(xmiUuid, node);
  }

  /**
   * @param xmiUuid the UUID associated with the XMI element being registered (cannot be <code>null
   *     </code> or empty)
   * @param xmiElement the XMI element being registered (cannot be <code>null</code>)
   */
  void record(final String xmiUuid, final XmiElement xmiElement) {
    CheckArg.isNotEmpty(xmiUuid, "xmiUuid");
    CheckArg.isNotNull(xmiElement, "xmiElement");
    this.uuidToXmiElement.put(xmiUuid, xmiElement);
  }

  //
  // /**
  // * Extracts the "mmuuid" values from the property if the property is indeed an XMI reference to
  // local objects.
  // *
  // * @param property the property
  // * @return the list of mmuuid values, or null if this property does not contain any references;
  // never empty
  // * @throws RepositoryException if error access property value(s)
  // */
  // List<String> references( final Property property ) throws RepositoryException {
  // final List<String> result = new LinkedList<String>();
  // for (final Value value : property.getValues()) {
  // final String str = value.getString();
  //
  // if (str.startsWith(CoreLexicon.ModelId.MM_HREF_PREFIX)) {
  // // It is a local reference ...
  // final String[] references = str.split("\\s");
  //
  // for (final String reference : references) {
  // result.add(reference);
  //
  // if (!property.isMultiple() && (references.length == 1)) {
  // // This is the only property value, and only one reference in it ...
  // return result;
  // }
  // }
  // } else {
  // assert result.isEmpty();
  // return null;
  // }
  // }
  //
  // return result;
  // }

  /**
   * @param unresolved the unresolved reference being marked as resolved (cannot be <code>null
   *     </code>)
   */
  public void resolved(final UnresolvedReference unresolved) {
    CheckArg.isNotNull(unresolved, "unresolved");
    final UnresolvedReference resolved = this.unresolved.remove(unresolved.getUuid());
    assert (unresolved == resolved);
    LOGGER.debug("UUID '{0}' has been resolved", unresolved.getUuid());
  }

  /**
   * @param proposedUuid the value whose UUID prefix is being removed (cannot be <code>null</code>
   *     or empty)
   * @return the UUID or <code>null</code> if the proposedUuid is not a UUID
   */
  public String resolveInternalReference(final String proposedUuid) {
    CheckArg.isNotNull(proposedUuid, "proposedUuid");
    String mmuuid = null;
    final int index = proposedUuid.indexOf(CoreLexicon.ModelId.MM_HREF_PREFIX);

    if (index != -1) {
      // It's a local reference ...
      try {
        mmuuid =
            UUID.fromString(
                    proposedUuid.substring(index + CoreLexicon.ModelId.MM_HREF_PREFIX.length()))
                .toString();
      } catch (final IllegalArgumentException e) {
        // ignore
      }
    } else {
      try {
        mmuuid = UUID.fromString(proposedUuid).toString();
      } catch (final IllegalArgumentException e) {
        // ignore
      }
    }

    return mmuuid;
  }

  final class UnresolvedProperty {

    private final boolean multi;
    private final String name;
    private final List<String> values;

    protected UnresolvedProperty(final String name, final String value, final boolean multi) {
      this.name = name;
      this.values = new ArrayList<String>();
      this.values.add(value);
      this.multi = multi;
    }

    protected void addValue(final String newValue) {
      if (this.multi) {
        this.values.add(newValue);
      }
    }

    public String getName() {
      return this.name;
    }

    public String getValue() {
      if (this.multi) {
        throw new IllegalArgumentException();
      }

      return (this.values.isEmpty() ? null : this.values.get(0));
    }

    public List<String> getValues() {
      if (this.multi) {
        return this.values;
      }

      throw new IllegalArgumentException();
    }

    public boolean isMulti() {
      return this.multi;
    }
  }

  /**
   * A referenced UUID that did not have a node associated with it at the time the reference was
   * found.
   */
  class UnresolvedReference {

    private final Set<String> mixins = new HashSet<String>(2);

    /**
     * Once resolved, the node specified node properties will be set with the values. The key is the
     * name of the property and the value is a collection of values to set.
     */
    private final Map<String, UnresolvedProperty> properties =
        new HashMap<String, ReferenceResolver.UnresolvedProperty>();

    /**
     * The unresolved node is the node whose name will be used to set a referencer property.
     *
     * <p>The key is the name of the referencer node property that will be set with the resolved
     * node name. The value is a collection of referencer node UUIDs.
     */
    private final Multimap<String, String> refNames = ArrayListMultimap.create();

    /**
     * The unresolved reference is the node that is the reference.
     *
     * <p>Once resolved, the specified referencer property will be set with the weak reference of
     * the resolved node. The key is the referencer property and the value is a collection of
     * referencer UUIDs.
     */
    private final Multimap<String, String> refRefs = ArrayListMultimap.create();

    /**
     * The unresolved reference is the node whose reference property needs to be set.
     *
     * <p>Once resolved, a weak reference value will be created from each of the referenced node
     * UUID values and the specified property will be set. The key is the name of the referencer
     * property. The value is a collection of referenced UUIDs.
     */
    private final Multimap<String, String> refs = ArrayListMultimap.create();

    private final String uuid;

    /**
     * <strong>Should only be called by the reference resolver.</strong>
     *
     * @param uuid the UUID of the unresolved reference (cannot be <code>null</code> or empty)
     */
    UnresolvedReference(final String uuid) {
      CheckArg.isNotEmpty(uuid, "uuid");
      this.uuid = uuid;
    }

    /**
     * @param newMixin the mixin to add to the unresolved reference (cannot be <code>null</code> or
     *     empty)
     * @return <code>true</code> if the mixin was successfully added
     */
    public boolean addMixin(final String newMixin) {
      CheckArg.isNotEmpty(newMixin, "newMixin");
      final boolean added = this.mixins.add(newMixin);

      if (added) {
        LOGGER.debug("added mixin '{0}' to the unresolved reference '{1}'", newMixin, this.uuid);
      }

      return added;
    }

    /**
     * @param propertyName the property name (cannot be <code>null</code> or empty)
     * @param propertyValue the property value (can be <code>null</code> or empty)
     * @param multiValued <code>true</code> if property is multi-valued
     */
    public void addProperty(
        final String propertyName, final String propertyValue, final boolean multiValued) {
      CheckArg.isNotEmpty(propertyName, "propertyName");

      if (!StringUtil.isBlank(propertyValue)) {
        if (multiValued) {
          UnresolvedProperty unresolvedProperty = this.properties.get(propertyName);

          if (unresolvedProperty == null) {
            unresolvedProperty = new UnresolvedProperty(propertyName, propertyValue, true);
          } else {
            unresolvedProperty.addValue(propertyValue);
          }
          LOGGER.debug(
              "added multi-valued property '{0}' with value '{1}' to the unresolved reference '{2}'",
              propertyName, propertyValue, this.uuid);
        } else {
          this.properties.put(
              propertyName, new UnresolvedProperty(propertyName, propertyValue, false));
          LOGGER.debug(
              "added property '{0}' with value '{1}' to the unresolved reference '{2}'",
              propertyName, propertyValue, this.uuid);
        }
      }
    }

    /**
     * @param propertyName the name of the referencer property to set once the reference is resolved
     *     (cannot be <code>null</code> or empty)
     * @param referencedUuid the UUID of the referenced node (cannot be <code>null</code> or empty)
     */
    public void addReference(final String propertyName, final String referencedUuid) {
      CheckArg.isNotEmpty(propertyName, "propertyName");
      CheckArg.isNotEmpty(referencedUuid, "referencerUuid");
      this.refs.put(propertyName, referencedUuid);
    }

    /**
     * @param referencerUuid the UUID of the referencer whose node property will be set with the
     *     weak reference of the resolved node (cannot be <code>null</code> or empty)
     * @param referencerPropertyName the name of the referencer property to set with the weak
     *     reference (cannot be <code>null</code> or empty)
     */
    public void addReferencerReference(
        final String referencerUuid, final String referencerPropertyName) {
      CheckArg.isNotEmpty(referencerUuid, "referencerUuid");
      CheckArg.isNotEmpty(referencerPropertyName, "referencerPropertyName");
      this.refRefs.put(referencerPropertyName, referencerUuid);
    }

    /**
     * @param referencerUuid the UUID of the node whose property needs to be set with the name of
     *     the resolved node (cannot be <code>null</code> or empty)
     * @param referencerPropertyName the name of the referencer property being set (cannot be <code>
     *     null</code> or empty)
     */
    public void addResolvedName(final String referencerUuid, final String referencerPropertyName) {
      CheckArg.isNotEmpty(referencerUuid, "referencerUuid");
      CheckArg.isNotEmpty(referencerPropertyName, "referencerPropertyName");
      this.refNames.put(referencerPropertyName, referencerUuid);
    }

    /** @return the mixins to add to the resolved node (never <code>null</code>) */
    protected Set<String> getMixins() {
      return this.mixins;
    }

    /**
     * Key is property name. Value is a collection of unresolved properties.
     *
     * @return the unresolved properties that have to be added to the resolved node (never <code>
     *     null</code>)
     */
    protected Map<String, UnresolvedProperty> getProperties() {
      return this.properties;
    }

    /**
     * Key is property name that needs to be set with the resolved node's name. Value is the UUID of
     * the node whose property is being set.
     *
     * @return the UUIDs of the nodes that need to be set with the resolved node name (never <code>
     *     null</code>)
     */
    protected Multimap<String, String> getReferenceNames() {
      return this.refNames;
    }

    /**
     * The unresolved reference is the node that the weak reference will be created from and set on
     * the referencer node.
     *
     * @return the referencers that need to have weak references set on (never <code>null</code>)
     */
    protected Multimap<String, String> getReferencerReferences() {
      return this.refRefs;
    }

    /**
     * Key is property name. Value is a collection of one or more UUID values. If multiple values
     * then property is multi-valued.
     *
     * @return the references that need to have weak references created (never <code>null</code>)
     */
    protected Multimap<String, String> getReferences() {
      return this.refs;
    }

    /** @return the UUID (never <code>null</code> or empty) */
    public String getUuid() {
      return this.uuid;
    }
  }
}
/**
 * The AbstractRepositoryDelegate provides the common logic for the implementation of the {@link
 * RepositoryDelegate}
 */
public abstract class AbstractRepositoryDelegate implements RepositoryDelegate {

  protected final Logger logger = Logger.getLogger(getClass());

  private Repository repository = null;
  private Set<String> repositoryNames = null;
  private ConnectionInfo connInfo = null;
  private String url;
  private Properties propertiesInfo;

  public AbstractRepositoryDelegate(String url, Properties info) {
    this.url = url;
    this.propertiesInfo = info;
  }

  /**
   * Returns a {@link ConnectionInfo} object which represents the information of a specific
   * connection, from a given url format and some {@link Properties}
   *
   * @param url a {@code non-null} string which represents a jdbc url
   * @param info a {@code non-null} {@link Properties} instance which may contain extra information
   *     needed by the connection
   * @return {@link ConnectionInfo} instance, never {@code null}
   */
  abstract ConnectionInfo createConnectionInfo(final String url, final Properties info);

  /**
   * Implementor is responsible for creating the repository.
   *
   * @throws SQLException
   */
  abstract void retrieveRepository() throws SQLException;

  @Override
  public synchronized ConnectionInfo getConnectionInfo() {
    if (this.connInfo == null) {
      this.connInfo = createConnectionInfo(url, propertiesInfo);
      this.connInfo.init();
    }
    return connInfo;
  }

  @Override
  public void closeStatement() {}

  @SuppressWarnings("unused")
  @Override
  public void commit() throws RepositoryException {}

  @Override
  public void close() {}

  @SuppressWarnings("unused")
  @Override
  public void rollback() throws RepositoryException {}

  @Override
  public Connection createConnection(DriverInfo info) throws SQLException {
    logger.debug("Creating connection for RepositoryDelegte");
    if (this.repository == null) {
      retrieveRepository();
    }
    return new JcrConnection(this, info);
  }

  public Repository getRepository() {
    return this.repository;
  }

  protected void setRepository(Repository repository) {
    this.repository = repository;
  }

  public String getRepositoryName() {
    return getConnectionInfo().getRepositoryName();
  }

  protected void setRepositoryName(String repositoryName) {
    this.getConnectionInfo().setRepositoryName(repositoryName);
  }

  @Override
  public Set<String> getRepositoryNames() {
    return this.repositoryNames;
  }

  protected void setRepositoryNames(Set<String> repositoryNames) {
    this.repositoryNames = repositoryNames;
  }

  @Override
  public boolean isWrapperFor(Class<?> iface) {
    return iface.isInstance(this);
  }

  @Override
  public <T> T unwrap(Class<T> iface) throws SQLException {
    if (!isWrapperFor(iface)) {
      throw new SQLException(
          JdbcLocalI18n.classDoesNotImplementInterface.text(
              RepositoryDelegate.class.getSimpleName(), iface.getName()));
    }

    return iface.cast(this);
  }
}
/**
 * The {@link RepositoryIndexManager} is the maintainer of index definitions for the entire
 * repository at run-time. The repository index manager maintains an immutable view of all index
 * definitions.
 */
@ThreadSafe
class RepositoryIndexManager implements IndexManager, NodeTypes.Listener {

  /**
   * Names of properties that are known to have non-unique values when used in a single-valued
   * index.
   */
  private static final Set<Name> NON_UNIQUE_PROPERTY_NAMES =
      Collections.unmodifiableSet(
          JcrLexicon.PRIMARY_TYPE,
          JcrLexicon.MIXIN_TYPES,
          JcrLexicon.PATH,
          ModeShapeLexicon.DEPTH,
          ModeShapeLexicon.LOCALNAME);
  /**
   * Names of properties that are known to have non-enumerated values when used in a single-valued
   * index.
   */
  private static final Set<Name> NON_ENUMERATED_PROPERTY_NAMES =
      Collections.unmodifiableSet(
          JcrLexicon.PRIMARY_TYPE,
          JcrLexicon.MIXIN_TYPES,
          JcrLexicon.PATH,
          ModeShapeLexicon.DEPTH,
          ModeShapeLexicon.LOCALNAME);

  private final JcrRepository.RunningState repository;
  private final RepositoryConfiguration config;
  private final ExecutionContext context;
  private final String systemWorkspaceName;
  private final Path indexesPath;
  private final Collection<Component> components;
  private final ConcurrentMap<String, IndexProvider> providers = new ConcurrentHashMap<>();
  private final AtomicBoolean initialized = new AtomicBoolean(false);
  private volatile IndexWriter indexWriter;

  private final Logger logger = Logger.getLogger(getClass());
  private volatile RepositoryIndexes indexes = RepositoryIndexes.NO_INDEXES;

  RepositoryIndexManager(JcrRepository.RunningState repository, RepositoryConfiguration config) {
    this.repository = repository;
    this.config = config;
    this.context = repository.context();
    this.systemWorkspaceName = this.repository.repositoryCache().getSystemWorkspaceName();

    PathFactory pathFactory = this.context.getValueFactories().getPathFactory();
    this.indexesPath = pathFactory.createAbsolutePath(JcrLexicon.SYSTEM, ModeShapeLexicon.INDEXES);

    // Set up the index providers ...
    this.components = config.getIndexProviders();
    for (Component component : components) {
      try {
        IndexProvider provider =
            component.createInstance(ScanningQueryEngine.class.getClassLoader());
        register(provider);
      } catch (Throwable t) {
        if (t.getCause() != null) {
          t = t.getCause();
        }
        this.repository.error(
            t,
            JcrI18n.unableToInitializeIndexProvider,
            component,
            repository.name(),
            t.getMessage());
      }
    }
  }

  /**
   * Initialize this manager by calling {@link IndexProvider#initialize()} on each of the
   * currently-registered providers.
   *
   * @return the information about the portions of the repository that need to be scanned to
   *     (re)build indexes; null if no scanning is required
   */
  protected synchronized ScanningTasks initialize() {
    if (initialized.get()) {
      // nothing to do ...
      return null;
    }

    // Initialize each of the providers, removing any that are not properly initialized ...
    for (Iterator<Map.Entry<String, IndexProvider>> providerIter = providers.entrySet().iterator();
        providerIter.hasNext(); ) {
      IndexProvider provider = providerIter.next().getValue();
      try {
        doInitialize(provider);
      } catch (Throwable t) {
        if (t.getCause() != null) {
          t = t.getCause();
        }
        repository.error(
            t,
            JcrI18n.unableToInitializeIndexProvider,
            provider.getName(),
            repository.name(),
            t.getMessage());
        providerIter.remove();
      }
    }
    // Re-read the index definitions in case there were disabled index definitions that used the
    // now-available provider ...
    RepositoryIndexes indexes = readIndexDefinitions();

    // Notify the providers of all the index definitions (which we'll treat as "new" since we're
    // just starting up) ...
    ScanningTasks feedback = new ScanningTasks();
    for (Iterator<Map.Entry<String, IndexProvider>> providerIter = providers.entrySet().iterator();
        providerIter.hasNext(); ) {
      IndexProvider provider = providerIter.next().getValue();
      if (provider == null) continue;
      final String providerName = provider.getName();

      IndexChanges changes = new IndexChanges();
      for (IndexDefinition indexDefn : indexes.getIndexDefinitions().values()) {
        if (!providerName.equals(indexDefn.getProviderName())) continue;
        changes.change(indexDefn);
      }
      // Even if there are no definitions, we still want to notify each of the providers ...
      try {
        provider.notify(
            changes,
            repository.changeBus(),
            repository.nodeTypeManager(),
            repository.repositoryCache().getWorkspaceNames(),
            feedback.forProvider(providerName));
      } catch (RuntimeException e) {
        logger.error(
            e,
            JcrI18n.errorNotifyingProviderOfIndexChanges,
            providerName,
            repository.name(),
            e.getMessage());
      }
    }

    // Refresh the index writer ...
    refreshIndexWriter();
    initialized.set(true);
    return feedback;
  }

  @Override
  public void notify(NodeTypes updatedNodeTypes) {
    // Notify all of the providers about the change in node types ...
    for (IndexProvider provider : providers.values()) {
      provider.notify(updatedNodeTypes);
    }
  }

  synchronized void importIndexDefinitions() throws RepositoryException {
    RepositoryConfiguration.Indexes indexes = config.getIndexes();
    if (indexes.isEmpty()) return;
    List<IndexDefinition> defns = new ArrayList<>();
    for (String indexName : indexes.getIndexNames()) {
      IndexDefinition defn = indexes.getIndex(indexName);
      if (defn != null) defns.add(defn);
    }
    if (!defns.isEmpty()) {
      IndexDefinition[] array = defns.toArray(new IndexDefinition[defns.size()]);
      registerIndexes(array, true);
      // Wait while the indexes get created ...
      try {
        Thread.sleep(500L + array.length * 50L);
      } catch (Exception e) {
        throw new SystemFailureException(e);
      }
      // We have to index the '/jcr:system' content, since it was created before these indexes were
      // registered ...
      repository.queryManager().reindexSystemContent();
    }
  }

  protected void refreshIndexWriter() {
    indexWriter = CompositeIndexWriter.create(providers.values());
  }

  /**
   * Initialize the supplied provider.
   *
   * @param provider the provider; may not be null
   * @throws RepositoryException if there is a problem initializing the provider
   */
  protected void doInitialize(IndexProvider provider) throws RepositoryException {

    // Set the execution context instance ...
    Reflection.setValue(provider, "context", repository.context());

    // Set the environment
    Reflection.setValue(provider, "environment", repository.environment());

    provider.initialize();

    // If successful, call the 'postInitialize' method reflectively (due to inability to call
    // directly) ...
    Method postInitialize = Reflection.findMethod(IndexProvider.class, "postInitialize");
    Reflection.invokeAccessibly(provider, postInitialize, new Object[] {});

    if (logger.isDebugEnabled()) {
      logger.debug(
          "Successfully initialized index provider '{0}' in repository '{1}'",
          provider.getName(), repository.name());
    }
  }

  void shutdown() {
    for (IndexProvider provider : providers.values()) {
      try {
        provider.shutdown();
      } catch (RepositoryException e) {
        logger.error(
            e,
            JcrI18n.errorShuttingDownIndexProvider,
            repository.name(),
            provider.getName(),
            e.getMessage());
      }
    }
  }

  /**
   * Get the query index writer that will delegate to all registered providers.
   *
   * @return the query index writer instance; never null
   */
  IndexWriter getIndexWriter() {
    return indexWriter;
  }

  /**
   * Get the query index writer that will delegate to only those registered providers with the given
   * names.
   *
   * @param providerNames the names of the providers that require indexing
   * @return a query index writer instance; never null
   */
  IndexWriter getIndexWriterForProviders(Set<String> providerNames) {
    List<IndexProvider> reindexProviders = new LinkedList<>();
    for (IndexProvider provider : providers.values()) {
      if (providerNames.contains(provider.getName())) {
        reindexProviders.add(provider);
      }
    }
    return CompositeIndexWriter.create(reindexProviders);
  }

  @Override
  public synchronized void register(IndexProvider provider) throws RepositoryException {
    if (providers.containsKey(provider.getName())) {
      throw new IndexProviderExistsException(
          JcrI18n.indexProviderAlreadyExists.text(provider.getName(), repository.name()));
    }

    // Set the repository name field ...
    Reflection.setValue(provider, "repositoryName", repository.name());

    // Set the logger instance
    Reflection.setValue(provider, "logger", ExtensionLogger.getLogger(provider.getClass()));

    if (initialized.get()) {
      // This manager is already initialized, so we have to initialize the new provider ...
      doInitialize(provider);
    }

    // Do this last so that it doesn't show up in the list of providers before it's properly
    // initialized ...
    IndexProvider existing = providers.putIfAbsent(provider.getName(), provider);
    if (existing != null) {
      throw new IndexProviderExistsException(
          JcrI18n.indexProviderAlreadyExists.text(provider.getName(), repository.name()));
    }

    // Re-read the index definitions in case there were disabled index definitions that used the
    // now-available provider ...
    readIndexDefinitions();

    // Refresh the index writer ...
    refreshIndexWriter();
  }

  @Override
  public void unregister(String providerName) throws RepositoryException {
    IndexProvider provider = providers.remove(providerName);
    if (provider == null) {
      throw new NoSuchProviderException(
          JcrI18n.indexProviderDoesNotExist.text(providerName, repository.name()));
    }
    if (initialized.get()) {
      provider.shutdown();
    }

    // Re-read the index definitions in case there were disabled index definitions that used the
    // now-available provider ...
    readIndexDefinitions();

    // Refresh the index writer ...
    refreshIndexWriter();
  }

  @Override
  public IndexStatus getIndexStatus(String providerName, String indexName, String workspaceName) {
    CheckArg.isNotNull(providerName, "providerName");
    CheckArg.isNotNull(indexName, "indexName");
    CheckArg.isNotNull(workspaceName, "workspaceName");

    IndexProvider provider = getProvider(providerName);
    if (provider == null) {
      return IndexStatus.NON_EXISTENT;
    }
    ManagedIndex managedIndex = provider.getManagedIndex(indexName, workspaceName);
    return managedIndex != null ? managedIndex.getStatus() : IndexStatus.NON_EXISTENT;
  }

  @Override
  public List<ManagedIndex> getIndexes(
      String providerName, String workspaceName, final IndexStatus status) {
    CheckArg.isNotNull(providerName, "providerName");
    CheckArg.isNotNull(workspaceName, "workspaceName");

    final List<ManagedIndex> result = new ArrayList<>();
    IndexProvider provider = getProvider(providerName);

    if (provider == null) {
      return result;
    }
    provider.onEachIndexInWorkspace(
        workspaceName,
        new IndexProvider.ManagedIndexOperation() {
          @Override
          public void apply(String workspaceName, ManagedIndex index, IndexDefinition defn) {
            if (index.getStatus().equals(status)) {
              result.add(index);
            }
          }
        });
    return result;
  }

  @Override
  public List<String> getIndexNames(
      String providerName, String workspaceName, final IndexStatus status) {
    CheckArg.isNotNull(providerName, "providerName");
    CheckArg.isNotNull(workspaceName, "workspaceName");

    final List<String> result = new ArrayList<>();
    IndexProvider provider = getProvider(providerName);

    if (provider == null) {
      return result;
    }
    provider.onEachIndexInWorkspace(
        workspaceName,
        new IndexProvider.ManagedIndexOperation() {
          @Override
          public void apply(String workspaceName, ManagedIndex index, IndexDefinition defn) {
            if (index.getStatus().equals(status)) {
              result.add(defn.getName());
            }
          }
        });
    return result;
  }

  @Override
  public Set<String> getProviderNames() {
    return Collections.unmodifiableSet(new HashSet<>(providers.keySet()));
  }

  protected Iterable<IndexProvider> getProviders() {
    return new ArrayList<>(providers.values());
  }

  @Override
  public IndexProvider getProvider(String name) {
    return providers.get(name);
  }

  @Override
  public Map<String, IndexDefinition> getIndexDefinitions() {
    return indexes.getIndexDefinitions();
  }

  @Override
  public IndexColumnDefinitionTemplate createIndexColumnDefinitionTemplate() {
    return new RepositoryIndexColumnDefinitionTemplate();
  }

  @Override
  public IndexDefinitionTemplate createIndexDefinitionTemplate() {
    return new RepositoryIndexDefinitionTemplate();
  }

  @Override
  public void registerIndex(IndexDefinition indexDefinition, boolean allowUpdate)
      throws InvalidIndexDefinitionException, IndexExistsException, RepositoryException {
    registerIndexes(new IndexDefinition[] {indexDefinition}, allowUpdate);
  }

  @Override
  public void registerIndexes(IndexDefinition[] indexDefinitions, boolean allowUpdate)
      throws InvalidIndexDefinitionException, IndexExistsException {
    CheckArg.isNotNull(indexDefinitions, "indexDefinitions");

    // Before we do anything, validate each of the index definitions and throw an exception ...
    RepositoryNodeTypeManager nodeTypeManager = repository.nodeTypeManager();
    List<IndexDefinition> validated = new ArrayList<>(indexDefinitions.length);
    Problems problems = new SimpleProblems();
    for (IndexDefinition defn : indexDefinitions) {
      String name = defn.getName();
      String providerName = defn.getProviderName();

      if (name == null) {
        problems.addError(JcrI18n.indexMustHaveName, defn, repository.name());
        continue;
      }
      if (indexes.getIndexDefinitions().containsKey(name) && !allowUpdate) {
        // Throw this one immediately ...
        String msg = JcrI18n.indexAlreadyExists.text(defn.getName(), repository.name());
        throw new IndexExistsException(msg);
      }
      if (providerName == null) {
        problems.addError(JcrI18n.indexMustHaveProviderName, defn.getName(), repository.name());
        continue;
      }
      if (defn.hasSingleColumn()) {
        IndexColumnDefinition columnDefn = defn.getColumnDefinition(0);
        Name propName =
            context.getValueFactories().getNameFactory().create(columnDefn.getPropertyName());
        switch (defn.getKind()) {
          case UNIQUE_VALUE:
            if (NON_UNIQUE_PROPERTY_NAMES.contains(propName)) {
              problems.addError(
                  JcrI18n.unableToCreateUniqueIndexForColumn,
                  defn.getName(),
                  columnDefn.getPropertyName());
            }
            break;
          case ENUMERATED_VALUE:
            if (NON_ENUMERATED_PROPERTY_NAMES.contains(propName)) {
              problems.addError(
                  JcrI18n.unableToCreateEnumeratedIndexForColumn,
                  defn.getName(),
                  columnDefn.getPropertyName());
            }
            break;
          case VALUE:
          case NODE_TYPE:
          case TEXT:
            break;
        }
      } else {
        // Mulitple columns ...
        if (defn.getKind() == IndexKind.NODE_TYPE) {
          // must be single-column indexes
          problems.addError(JcrI18n.nodeTypeIndexMustHaveOneColumn, defn.getName());
        }
      }
      IndexProvider provider = providers.get(providerName);
      if (provider == null) {
        problems.addError(JcrI18n.indexProviderDoesNotExist, defn.getName(), repository.name());
      } else {
        // Perform some default validations that should be applied to all providers...
        provider.validateDefaultColumnTypes(context, defn, problems);

        // Then have the provider perform any custom validations
        provider.validateProposedIndex(context, defn, nodeTypeManager, problems);

        // Create an instance of our own definition implementation ...
        defn = RepositoryIndexDefinition.createFrom(defn, true);

        validated.add(defn);
      }
    }
    if (problems.hasErrors()) {
      String msg = JcrI18n.invalidIndexDefinitions.text(repository.name(), problems);
      throw new InvalidIndexDefinitionException(new JcrProblems(problems), msg);
    }

    SessionCache systemCache = repository.createSystemSession(context, false);
    SystemContent system = new SystemContent(systemCache);
    for (IndexDefinition defn : validated) {
      String providerName = defn.getProviderName();

      // Determine if the index should be enabled ...
      defn = RepositoryIndexDefinition.createFrom(defn, providers.containsKey(providerName));

      // Write the definition to the system area ...
      system.store(defn, allowUpdate);
    }
    // Save the changes ...
    systemCache.save();

    // Refresh the immutable snapshot ...
    this.indexes = readIndexDefinitions();
  }

  @Override
  public void unregisterIndexes(String... indexNames)
      throws NoSuchIndexException, RepositoryException {
    if (indexNames == null || indexNames.length == 0) return;

    // Remove the definition from the system area ...
    SessionCache systemCache = repository.createSystemSession(context, false);
    SystemContent system = new SystemContent(systemCache);
    for (String indexName : indexNames) {
      IndexDefinition defn = indexes.getIndexDefinitions().get(indexName);
      if (defn == null) {
        throw new NoSuchIndexException(
            JcrI18n.indexDoesNotExist.text(indexName, repository.name()));
      }
      system.remove(defn);
    }
    system.save();

    // Refresh the immutable snapshot ...
    this.indexes = readIndexDefinitions();
  }

  RepositoryIndexManager with(JcrRepository.RunningState repository) {
    return new RepositoryIndexManager(repository, config);
  }

  protected final ValueFactory<String> strings() {
    return this.context.getValueFactories().getStringFactory();
  }

  /**
   * Get an immutable snapshot of the index definitions. This can be used by the query engine to
   * determine which indexes might be usable when querying a specific selector (node type).
   *
   * @return a snapshot of the index definitions at this moment; never null
   */
  public RepositoryIndexes getIndexes() {
    return indexes;
  }

  protected ScanningTasks notify(ChangeSet changeSet) {
    if (changeSet.getWorkspaceName() == null) {
      // This is a change to the workspaces or repository metadata ...

      // Refresh the index definitions ...
      RepositoryIndexes indexes = readIndexDefinitions();
      ScanningTasks feedback = new ScanningTasks();
      if (!indexes.getIndexDefinitions().isEmpty()) {
        // Build up the names of the added and removed workspace names ...
        Set<String> addedWorkspaces = new HashSet<>();
        Set<String> removedWorkspaces = new HashSet<>();
        for (Change change : changeSet) {
          if (change instanceof WorkspaceAdded) {
            WorkspaceAdded added = (WorkspaceAdded) change;
            addedWorkspaces.add(added.getWorkspaceName());
          } else if (change instanceof WorkspaceRemoved) {
            WorkspaceRemoved removed = (WorkspaceRemoved) change;
            removedWorkspaces.add(removed.getWorkspaceName());
          }
        }
        if (!addedWorkspaces.isEmpty() || !removedWorkspaces.isEmpty()) {
          // Figure out which providers need to be called, and which definitions go with those
          // providers ...
          Map<String, List<IndexDefinition>> defnsByProvider = new HashMap<>();
          for (IndexDefinition defn : indexes.getIndexDefinitions().values()) {
            String providerName = defn.getProviderName();
            List<IndexDefinition> defns = defnsByProvider.get(providerName);
            if (defns == null) {
              defns = new ArrayList<>();
              defnsByProvider.put(providerName, defns);
            }
            defns.add(defn);
          }
          // Then for each provider ...
          for (Map.Entry<String, List<IndexDefinition>> entry : defnsByProvider.entrySet()) {
            String providerName = entry.getKey();
            WorkspaceIndexChanges changes =
                new WorkspaceIndexChanges(entry.getValue(), addedWorkspaces, removedWorkspaces);
            IndexProvider provider = providers.get(providerName);
            if (provider == null) continue;
            provider.notify(
                changes,
                repository.changeBus(),
                repository.nodeTypeManager(),
                repository.repositoryCache().getWorkspaceNames(),
                feedback.forProvider(providerName));
          }
        }
      }
      return feedback;
    }
    if (!systemWorkspaceName.equals(changeSet.getWorkspaceName())) {
      // The change does not affect the 'system' workspace, so skip it ...
      return null;
    }

    // It is simple to listen to all local and remote changes. Therefore, any changes made locally
    // to the index definitions
    // will be propagated through the cached representation via this listener.
    AtomicReference<Map<Name, IndexChangeInfo>> changesByProviderName = new AtomicReference<>();
    for (Change change : changeSet) {
      if (change instanceof NodeAdded) {
        NodeAdded added = (NodeAdded) change;
        Path addedPath = added.getPath();
        if (indexesPath.isAncestorOf(addedPath)) {
          // Get the name of the affected provider ...
          Name providerName = addedPath.getSegment(2).getName();
          if (addedPath.size() > 3) {
            // Adding an index (or column definition), but all we care about is the name of the
            // index
            Name indexName = addedPath.getSegment(3).getName();
            changeInfoForProvider(changesByProviderName, providerName).changed(indexName);
          }
        }
      } else if (change instanceof NodeRemoved) {
        NodeRemoved removed = (NodeRemoved) change;
        Path removedPath = removed.getPath();
        if (indexesPath.isAncestorOf(removedPath)) {
          // Get the name of the affected provider ...
          Name providerName = removedPath.getSegment(2).getName();
          if (removedPath.size() > 4) {
            // It's a column definition being removed, so the index is changed ...
            Name indexName = removedPath.getSegment(3).getName();
            changeInfoForProvider(changesByProviderName, providerName).removed(indexName);
          } else if (removedPath.size() > 3) {
            // Removing an index (or column definition), but all we care about is the name of the
            // index
            Name indexName = removedPath.getSegment(3).getName();
            changeInfoForProvider(changesByProviderName, providerName).removed(indexName);
          } else if (removedPath.size() == 3) {
            // The whole provider was removed ...
            changeInfoForProvider(changesByProviderName, providerName).removedAll();
          }
        }
      } else if (change instanceof PropertyChanged) {
        PropertyChanged propChanged = (PropertyChanged) change;
        Path changedPath = propChanged.getPathToNode();
        if (indexesPath.isAncestorOf(changedPath)) {
          if (changedPath.size() > 3) {
            // Adding an index (or column definition), but all we care about is the name of the
            // index
            Name providerName = changedPath.getSegment(2).getName();
            Name indexName = changedPath.getSegment(3).getName();
            changeInfoForProvider(changesByProviderName, providerName).changed(indexName);
          }
        }
      } // we don't care about node moves (don't happen) or property added/removed (handled by node
      // add/remove)
    }

    if (changesByProviderName.get() == null || changesByProviderName.get().isEmpty()) {
      // No changes to the indexes ...
      return null;
    }
    // Refresh the index definitions ...
    RepositoryIndexes indexes = readIndexDefinitions();

    // And notify the affected providers ...
    StringFactory strings = context.getValueFactories().getStringFactory();
    ScanningTasks feedback = new ScanningTasks();
    for (Map.Entry<Name, IndexChangeInfo> entry : changesByProviderName.get().entrySet()) {
      String providerName = strings.create(entry.getKey());
      IndexProvider provider = providers.get(providerName);
      if (provider == null) continue;

      IndexChanges changes = new IndexChanges();
      IndexChangeInfo info = entry.getValue();
      if (info.removedAll) {
        // Get all of the definitions for this provider ...
        for (IndexDefinition defn : indexes.getIndexDefinitions().values()) {
          if (defn.getProviderName().equals(providerName)) changes.remove(defn.getName());
        }
      }
      // Others might have been added or changed after the existing ones were removed ...
      for (Name name : info.removedIndexes) {
        changes.remove(strings.create(name));
      }
      for (Name name : info.changedIndexes) {
        IndexDefinition defn = indexes.getIndexDefinitions().get(strings.create(name));
        if (defn != null) changes.change(defn);
      }
      // Notify the provider ...
      try {
        provider.notify(
            changes,
            repository.changeBus(),
            repository.nodeTypeManager(),
            repository.repositoryCache().getWorkspaceNames(),
            feedback.forProvider(providerName));
      } catch (RuntimeException e) {
        logger.error(
            e,
            JcrI18n.errorNotifyingProviderOfIndexChanges,
            providerName,
            repository.name(),
            e.getMessage());
      }
    }

    // Finally swap the snapshot of indexes ...
    this.indexes = indexes;
    return feedback;
  }

  protected boolean hasProviders() {
    return !providers.isEmpty();
  }

  protected static IndexChangeInfo changeInfoForProvider(
      AtomicReference<Map<Name, IndexChangeInfo>> changesByProviderName, Name providerName) {
    Map<Name, IndexChangeInfo> byProviderName = changesByProviderName.get();
    if (byProviderName == null) {
      byProviderName = new HashMap<>();
      changesByProviderName.set(byProviderName);
    }
    IndexChangeInfo info = byProviderName.get(providerName);
    if (info == null) {
      info = new IndexChangeInfo();
      byProviderName.put(providerName, info);
    }
    return info;
  }

  protected static final class IndexChangeInfo {
    protected final Set<Name> changedIndexes = new HashSet<>();
    protected final Set<Name> removedIndexes = new HashSet<>();
    protected boolean removedAll = false;

    public void changed(Name indexName) {
      changedIndexes.add(indexName);
      removedIndexes.remove(indexName);
    }

    public void removed(Name indexName) {
      removedIndexes.add(indexName);
      changedIndexes.remove(indexName);
    }

    public void removedAll() {
      removedAll = true;
      removedIndexes.clear();
      changedIndexes.clear();
    }
  }

  protected static final class IndexChanges implements IndexDefinitionChanges {
    private final Set<String> removedDefinitions = new HashSet<>();
    private final Map<String, IndexDefinition> changedDefinitions = new HashMap<>();

    protected void remove(String name) {
      removedDefinitions.add(name);
    }

    protected void change(IndexDefinition indexDefn) {
      this.changedDefinitions.put(indexDefn.getName(), indexDefn);
    }

    @Override
    public Set<String> getRemovedIndexDefinitions() {
      return removedDefinitions;
    }

    @Override
    public Map<String, IndexDefinition> getUpdatedIndexDefinitions() {
      return changedDefinitions;
    }
  }

  protected static final class WorkspaceIndexChanges implements WorkspaceChanges {
    private final List<IndexDefinition> definitions;
    private final Set<String> addedWorkspaceNames;
    private final Set<String> removedWorkspaceNames;

    protected WorkspaceIndexChanges(
        List<IndexDefinition> defns, Set<String> addedWorkspaces, Set<String> removedWorkspaces) {
      this.definitions = defns;
      this.addedWorkspaceNames = addedWorkspaces;
      this.removedWorkspaceNames = removedWorkspaces;
    }

    @Override
    public Collection<IndexDefinition> getIndexDefinitions() {
      return definitions;
    }

    @Override
    public Set<String> getAddedWorkspaces() {
      return addedWorkspaceNames;
    }

    @Override
    public Set<String> getRemovedWorkspaces() {
      return removedWorkspaceNames;
    }
  }

  protected RepositoryIndexes readIndexDefinitions() {
    // There were at least some changes ...
    NodeTypes nodeTypes = repository.nodeTypeManager().getNodeTypes();
    try {
      // Read the affected index definitions ...
      SessionCache systemCache = repository.createSystemSession(context, false);
      SystemContent system = new SystemContent(systemCache);
      Collection<IndexDefinition> indexDefns = system.readAllIndexDefinitions(providers.keySet());
      this.indexes = new Indexes(context, indexDefns, nodeTypes);
      return this.indexes;
    } catch (WorkspaceNotFoundException e) {
      // This happens occasionally when shutting down ...
    } catch (Throwable e) {
      logger.error(e, JcrI18n.errorRefreshingIndexDefinitions, repository.name());
    }
    return indexes;
  }

  /**
   * An immutable view of the indexes defined for the repository.
   *
   * @author Randall Hauch ([email protected])
   */
  @Immutable
  public static final class Indexes extends RepositoryIndexes {
    private final Map<String, IndexDefinition> indexByName = new HashMap<>();
    private final Map<String, Map<String, Collection<IndexDefinition>>>
        indexesByProviderByNodeTypeName = new HashMap<>();

    protected Indexes(
        ExecutionContext context, Collection<IndexDefinition> defns, NodeTypes nodeTypes) {
      // Identify the subtypes for each node type, and do this before we build any views ...
      if (!defns.isEmpty()) {
        Map<Name, Collection<String>> subtypesByName = new HashMap<>();
        for (JcrNodeType nodeType : nodeTypes.getAllNodeTypes()) {
          // For each of the supertypes ...
          for (JcrNodeType supertype : nodeType.getTypeAndSupertypes()) {
            Collection<String> types = subtypesByName.get(supertype.getInternalName());
            if (types == null) {
              types = new LinkedList<>();
              subtypesByName.put(supertype.getInternalName(), types);
            }
            types.add(nodeType.getName());
          }
        }

        // Now process all of the indexes ...
        NameFactory names = context.getValueFactories().getNameFactory();
        Set<Name> nodeTypeNames = new HashSet<>();
        for (IndexDefinition defn : defns) {

          // Determine all of the node types that are subtypes of any columns
          nodeTypeNames.clear();
          Name nodeTypeName = names.create(defn.getNodeTypeName());

          if (!subtypesByName.containsKey(nodeTypeName)) {
            Logger.getLogger(getClass())
                .warn(
                    JcrI18n.errorIndexing,
                    "not creating index "
                        + defn.getName()
                        + " because of unknown nodeType "
                        + nodeTypeName.getString());
            continue;
          }

          indexByName.put(defn.getName(), defn);

          // Now find out all of the node types that are or subtype the named node types ...
          for (String typeAndSubtype : subtypesByName.get(nodeTypeName)) {
            Map<String, Collection<IndexDefinition>> byProvider =
                indexesByProviderByNodeTypeName.get(typeAndSubtype);
            if (byProvider == null) {
              byProvider = new HashMap<>();
              indexesByProviderByNodeTypeName.put(typeAndSubtype, byProvider);
            }
            Collection<IndexDefinition> indexes = byProvider.get(defn.getProviderName());
            if (indexes == null) {
              indexes = new LinkedList<>();
              byProvider.put(typeAndSubtype, indexes);
            }
            indexes.add(defn);
          }
        }
      }
    }

    @Override
    public boolean hasIndexDefinitions() {
      return !indexByName.isEmpty();
    }

    @Override
    public Map<String, IndexDefinition> getIndexDefinitions() {
      return java.util.Collections.unmodifiableMap(indexByName);
    }

    @Override
    public Iterable<IndexDefinition> indexesFor(String nodeTypeName, String providerName) {
      Map<String, Collection<IndexDefinition>> defnsByProvider =
          indexesByProviderByNodeTypeName.get(nodeTypeName);
      if (defnsByProvider == null) return null;
      return defnsByProvider.get(providerName);
    }
  }

  static interface ScanOperation {
    public void scan(String workspace, Path path);
  }

  /**
   * An immutable set of provider names and non-overlapping workspace-path pairs.
   *
   * @author Randall Hauch ([email protected])
   */
  @Immutable
  static class ScanningRequest {

    protected static final ScanningRequest EMPTY = new ScanningRequest();

    private final Set<String> providerNames;
    private final Multimap<String, PathToScan> pathsToScanByWorkspace;

    protected ScanningRequest() {
      this.providerNames = java.util.Collections.emptySet();
      this.pathsToScanByWorkspace = ArrayListMultimap.create();
    }

    protected ScanningRequest(
        Multimap<String, PathToScan> pathsToScanByWorkspace, Set<String> providerNames) {
      assert pathsToScanByWorkspace != null;
      assert providerNames != null;
      this.providerNames = Collections.unmodifiableSet(providerNames);
      this.pathsToScanByWorkspace = pathsToScanByWorkspace;
    }

    /**
     * Determine if this has no providers or workspace-path pairs.
     *
     * @return true if this request is empty, or false otherwise
     */
    public boolean isEmpty() {
      return providerNames.isEmpty();
    }

    /**
     * Scan each required path in each of the workspaces.
     *
     * @param operation the scanning operation that is to be called for each workspace & path
     *     combination; may not be null
     */
    public void onEachPathInWorkspace(ScanOperation operation) {
      for (Map.Entry<String, PathToScan> entry : pathsToScanByWorkspace.entries()) {
        String workspaceName = entry.getKey();
        PathToScan pathToScan = entry.getValue();
        try {
          for (IndexingCallback callback : pathToScan) {
            try {
              callback.beforeIndexing();
            } catch (RuntimeException e) {
              Logger.getLogger(getClass())
                  .error(
                      e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
            }
          }
          operation.scan(workspaceName, pathToScan.path());
        } catch (RuntimeException e) {
          Logger.getLogger(getClass())
              .error(e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
        } finally {
          for (IndexingCallback callback : pathToScan) {
            try {
              callback.afterIndexing();
            } catch (RuntimeException e) {
              Logger.getLogger(getClass())
                  .error(
                      e, JcrI18n.errorIndexing, pathToScan.path(), workspaceName, e.getMessage());
            }
          }
        }
      }
    }

    /**
     * Get the set of provider names that are to be included in the scanning.
     *
     * @return the provider names; never null but possibly empty if {@link #isEmpty()} returns true
     */
    public Set<String> providerNames() {
      return providerNames;
    }
  }

  private static class PathToScan implements Iterable<IndexingCallback> {
    private final Path path;
    private final Set<IndexingCallback> callbacks = new CopyOnWriteArraySet<>();

    protected PathToScan(Path path, IndexingCallback callback) {
      this.path = path;
      if (callback != null) this.callbacks.add(callback);
    }

    public void addCallbacks(PathToScan other) {
      callbacks.addAll(other.callbacks);
    }

    public Path path() {
      return path;
    }

    @Override
    public int hashCode() {
      return path.hashCode();
    }

    @Override
    public boolean equals(Object obj) {
      return path.equals(obj);
    }

    @Override
    public Iterator<IndexingCallback> iterator() {
      return callbacks.iterator();
    }
  }

  /**
   * Threadsafe utility class for maintaining the list of providers and workspace-path pairs that
   * need to be scanned. Instances can be safely combined using {@link #add(ScanningTasks)}, and
   * immutable snapshots of the information can be obtained via {@link #drain()} (which atomically
   * empties the providers and workspace-path pairs into the immutable {@link ScanningRequest}).
   *
   * @author Randall Hauch ([email protected])
   */
  @ThreadSafe
  static class ScanningTasks {
    private final Set<String> providerNames = new HashSet<>();
    private Multimap<String, PathToScan> pathsByWorkspaceName = ArrayListMultimap.create();

    /**
     * Add all of the provider names and workspace-path pairs from the supplied scanning task.
     *
     * @param other the other scanning task; may be null
     * @return true if there is at least one workspace-path pair and provider, or false if there are
     *     none
     */
    public synchronized boolean add(ScanningTasks other) {
      if (other != null) {
        this.providerNames.addAll(other.providerNames);
        for (Map.Entry<String, PathToScan> entry : other.pathsByWorkspaceName.entries()) {
          add(entry.getKey(), entry.getValue());
        }
      }
      return !this.providerNames.isEmpty();
    }

    /**
     * Atomically drain all of the provider names and workspace-path pairs from this object and
     * return them in an immutable {@link ScanningRequest}.
     *
     * @return the immutable set of provider names and workspace-path pairs; never null
     */
    public synchronized ScanningRequest drain() {
      if (this.providerNames.isEmpty()) return ScanningRequest.EMPTY;

      Set<String> providerNames = new HashSet<>(this.providerNames);
      Multimap<String, PathToScan> pathsToScanByWorkspace = this.pathsByWorkspaceName;
      this.pathsByWorkspaceName = ArrayListMultimap.create();
      this.providerNames.clear();
      return new ScanningRequest(pathsToScanByWorkspace, providerNames);
    }

    protected synchronized void add(
        String providerName, String workspaceName, Path path, IndexingCallback callback) {
      assert providerName != null;
      assert workspaceName != null;
      assert path != null;
      providerNames.add(providerName);
      add(workspaceName, path, callback);
    }

    private void add(String workspaceName, PathToScan pathToScan) {
      Collection<PathToScan> pathsToScan = pathsByWorkspaceName.get(workspaceName);
      if (pathsToScan.isEmpty()) {
        pathsToScan.add(pathToScan);
      } else {
        Iterator<PathToScan> iter = pathsToScan.iterator();
        boolean add = true;
        final Path path = pathToScan.path();
        while (iter.hasNext()) {
          PathToScan existing = iter.next();
          Path existingPath = existing.path();
          if (path.isAtOrAbove(existingPath)) {
            // Remove all of the existing paths that are at or above this path (we'll add it back in
            // ...)
            iter.remove();
            // But add all of the callbacks ...
            pathToScan.addCallbacks(existing);
          } else if (path.isDescendantOf(existingPath)) {
            // The new path is a descendant of an existing path, so we can stop now and do nothing
            // ...
            add = false;
            // But add all of the callbacks ...
            existing.addCallbacks(pathToScan);
            break;
          }
        }
        if (add) pathsByWorkspaceName.put(workspaceName, pathToScan);
      }
    }

    private void add(String workspaceName, Path path, IndexingCallback callback) {
      add(workspaceName, new PathToScan(path, callback));
    }

    /**
     * Obtain an {@link IndexFeedback} instance that can be used to gather feedback from the named
     * provider.
     *
     * @param providerName the name of the index provider; may not be null
     * @return the custom IndexFeedback instance; never null
     */
    protected IndexFeedback forProvider(final String providerName) {
      assert providerName != null;
      return new IndexFeedback() {

        @Override
        public void scan(String workspaceName, IndexingCallback callback) {
          add(providerName, workspaceName, Path.ROOT_PATH, callback);
        }

        @Override
        public void scan(String workspaceName, IndexingCallback callback, Path path) {
          add(providerName, workspaceName, path, callback);
        }
      };
    }
  }
}
/**
 * Implementation of a {@link ChangeBus} which can run in a cluster, via JGroups. This bus wraps
 * around another bus, to which it delegates all "local" processing of events.
 *
 * @author Horia Chiorean
 */
@ThreadSafe
public final class ClusteredRepositoryChangeBus implements ChangeBus {

  protected static final Logger LOGGER = Logger.getLogger(ClusteredRepositoryChangeBus.class);

  /** The wrapped standalone bus to which standard bus operations are delegated */
  protected final ChangeBus delegate;

  /** The listener for channel changes. */
  private final Listener listener = new Listener();

  /**
   * The component that will receive the JGroups messages and broadcast them to this bus' observers.
   */
  private final Receiver receiver = new Receiver();

  /** Flag that dictates whether this bus has connected to the cluster. */
  protected final AtomicBoolean isOpen = new AtomicBoolean(false);

  /**
   * Flag that dictates whether there are multiple participants in the cluster; if not, then the
   * changes are propagated only to the local observers.
   */
  protected final AtomicBoolean multipleAddressesInCluster = new AtomicBoolean(false);

  /** The clustering configuration */
  protected final RepositoryConfiguration.Clustering clusteringConfiguration;

  /**
   * The JGroups channel to which all {@link #notify(ChangeSet) change notifications} will be sent
   * and from which all changes will be received and sent to the observers.
   *
   * <p>It is important that the order of the {@link ChangeSet} instances are maintained across the
   * cluster, and JGroups will do this for us as long as we push all local changes into the channel
   * and receive all local/remote changes from the channel.
   */
  private Channel channel;

  public ClusteredRepositoryChangeBus(
      RepositoryConfiguration.Clustering clusteringConfiguration, ChangeBus delegate) {
    CheckArg.isNotNull(clusteringConfiguration, "clusteringConfiguration");
    CheckArg.isNotNull(delegate, "delegate");

    this.clusteringConfiguration = clusteringConfiguration;
    assert clusteringConfiguration.isEnabled();
    this.delegate = delegate;
  }

  @Override
  public synchronized void start() throws Exception {
    String clusterName = clusteringConfiguration.getClusterName();
    if (clusterName == null) {
      throw new IllegalStateException(BusI18n.clusterNameRequired.text());
    }
    if (channel != null) {
      // Disconnect from any previous channel ...
      channel.removeChannelListener(listener);
      channel.setReceiver(null);
    }
    // Create the new channel by calling the delegate method ...
    channel = newChannel();
    // Add a listener through which we'll know what's going on within the cluster ...
    channel.addChannelListener(listener);

    // Set the receiver through which we'll receive all of the changes ...
    channel.setReceiver(receiver);

    // Now connect to the cluster ...
    channel.connect(clusterName);

    // start the delegate
    delegate.start();
  }

  private Channel newChannel() throws Exception {
    // Try to get the channel directly from the configuration (and its environment) ...
    Channel channel = clusteringConfiguration.getChannel();
    if (channel != null) {
      return channel;
    }

    String lookupClassName = clusteringConfiguration.getChannelProviderClassName();
    assert lookupClassName != null;

    Class<?> lookupClass = Class.forName(lookupClassName);
    if (!ChannelProvider.class.isAssignableFrom(lookupClass)) {
      throw new IllegalArgumentException(
          "Invalid channel lookup class configured. Expected a subclass of org.modeshape.jcr.clustering.ChannelProvider. Actual class:"
              + lookupClass);
    }
    return ((ChannelProvider) lookupClass.newInstance()).getChannel(clusteringConfiguration);
  }

  @Override
  public boolean hasObservers() {
    return delegate.hasObservers();
  }

  /**
   * Return whether this bus has been {@link #start() started} and not yet {@link #shutdown() shut
   * down}.
   *
   * @return true if {@link #start()} has been called but {@link #shutdown()} has not, or false
   *     otherwise
   */
  public boolean isStarted() {
    return channel != null;
  }

  @Override
  public synchronized void shutdown() {
    if (channel != null) {
      // Mark this as not accepting any more ...
      isOpen.set(false);
      try {
        // Disconnect from the channel and close it ...
        channel.removeChannelListener(listener);
        channel.setReceiver(null);
        channel.close();
      } finally {
        channel = null;
        // Now that we're not receiving any more messages, shut down the delegate
        delegate.shutdown();
      }
    }
  }

  @Override
  public void notify(ChangeSet changeSet) {
    if (changeSet == null) {
      return; // do nothing
    }
    if (!isOpen.get()) {
      // The channel is not open ...
      return;
    }
    if (!multipleAddressesInCluster.get()) {
      // We are in clustered mode, but there is only one participant in the cluster (us).
      // So short-circuit the cluster and just notify the local observers ...
      if (hasObservers()) {
        delegate.notify(changeSet);
        logReceivedOperation(changeSet);
      }
      return;
    }

    // There are multiple participants in the cluster, so send all changes out to JGroups,
    // letting JGroups do the ordering of messages...
    try {
      logSendOperation(changeSet);
      byte[] data = serialize(changeSet);
      Message message = new Message(null, null, data);
      channel.send(message);
    } catch (IllegalStateException e) {
      LOGGER.warn(
          BusI18n.unableToNotifyChanges,
          clusteringConfiguration.getClusterName(),
          changeSet.size(),
          changeSet.getWorkspaceName(),
          changeSet.getUserId(),
          changeSet.getProcessKey(),
          changeSet.getTimestamp());
    } catch (Exception e) {
      // Something went wrong here (this should not happen) ...
      String msg =
          BusI18n.errorSerializingChanges.text(
              clusteringConfiguration.getClusterName(),
              changeSet.size(),
              changeSet.getWorkspaceName(),
              changeSet.getUserId(),
              changeSet.getProcessKey(),
              changeSet.getTimestamp(),
              changeSet);
      throw new SystemFailureException(msg, e);
    }
  }

  protected final void logSendOperation(ChangeSet changeSet) {
    if (LOGGER.isTraceEnabled()) {
      LOGGER.trace(
          "Sending to cluster '{0}' {1} changes on workspace {2} made by {3} from process '{4}' at {5}",
          clusteringConfiguration.getClusterName(),
          changeSet.size(),
          changeSet.getWorkspaceName(),
          changeSet.getUserData(),
          changeSet.getProcessKey(),
          changeSet.getTimestamp());
    }
  }

  protected final void logReceivedOperation(ChangeSet changeSet) {
    if (LOGGER.isTraceEnabled()) {
      LOGGER.trace(
          "Received on cluster '{0}' {1} changes on workspace {2} made by {3} from process '{4}' at {5}",
          clusteringConfiguration.getClusterName(),
          changeSet.size(),
          changeSet.getWorkspaceName(),
          changeSet.getUserId(),
          changeSet.getProcessKey(),
          changeSet.getTimestamp());
    }
  }

  @Override
  public boolean register(ChangeSetListener observer) {
    return delegate.register(observer);
  }

  @Override
  public boolean unregister(ChangeSetListener observer) {
    return delegate.unregister(observer);
  }

  protected byte[] serialize(ChangeSet changes) throws Exception {
    ByteArrayOutputStream output = new ByteArrayOutputStream();
    ObjectOutputStream stream = new ObjectOutputStream(output);
    stream.writeObject(changes);
    stream.close();
    return output.toByteArray();
  }

  protected ChangeSet deserialize(byte[] data) throws Exception {
    ObjectInputStreamWithClassLoader input =
        new ObjectInputStreamWithClassLoader(
            new ByteArrayInputStream(data), getClass().getClassLoader());
    ChangeSet toReturn = (ChangeSet) input.readObject();
    input.close();
    return toReturn;
  }

  protected final class Receiver extends ReceiverAdapter {

    @Override
    public void block() {
      isOpen.set(false);
    }

    @Override
    public void receive(final Message message) {
      if (!hasObservers()) {
        return;
      }
      // We have at least one
      try {
        // Deserialize the changes ...
        ChangeSet changes = deserialize(message.getBuffer());
        // and broadcast them
        delegate.notify(changes);
        logReceivedOperation(changes);
      } catch (Exception e) {
        // Something went wrong here (this should not happen) ...
        String msg =
            BusI18n.errorDeserializingChanges.text(clusteringConfiguration.getClusterName());
        throw new SystemFailureException(msg, e);
      }
    }

    @Override
    public void suspect(Address suspectedMbr) {
      LOGGER.error(
          BusI18n.memberOfClusterIsSuspect, clusteringConfiguration.getClusterName(), suspectedMbr);
    }

    @Override
    public void viewAccepted(View newView) {
      LOGGER.trace(
          "Members of '{0}' cluster have changed: {1}",
          clusteringConfiguration.getClusterName(), newView);
      if (newView.getMembers().size() > 1) {
        if (multipleAddressesInCluster.compareAndSet(false, true)) {
          LOGGER.debug(
              "There are now multiple members of cluster '{0}'; changes will be propagated throughout the cluster",
              clusteringConfiguration.getClusterName());
        }
      } else {
        if (multipleAddressesInCluster.compareAndSet(true, false)) {
          LOGGER.debug(
              "There is only one member of cluster '{0}'; changes will be propagated locally only",
              clusteringConfiguration.getClusterName());
        }
      }
    }
  }

  protected final class Listener implements ChannelListener {
    @Override
    public void channelClosed(Channel channel) {
      isOpen.set(false);
    }

    @Override
    public void channelConnected(Channel channel) {
      isOpen.set(true);
    }

    @Override
    public void channelDisconnected(Channel channel) {
      isOpen.set(false);
    }
  }

  /**
   * ObjectInputStream extention that allows a different class loader to be used when resolving
   * types.
   */
  protected final class ObjectInputStreamWithClassLoader extends ObjectInputStream {

    private final ClassLoader cl;

    public ObjectInputStreamWithClassLoader(InputStream in, ClassLoader cl) throws IOException {
      super(in);
      this.cl = cl;
    }

    @Override
    protected Class<?> resolveClass(ObjectStreamClass desc)
        throws IOException, ClassNotFoundException {
      if (cl == null) {
        return super.resolveClass(desc);
      }
      try {
        return Class.forName(desc.getName(), false, cl);
      } catch (ClassNotFoundException ex) {
        return super.resolveClass(desc);
      }
    }
  }
}
/**
 * The {@link RepositoryNodeTypeManager} is the maintainer of node type information for the entire
 * repository at run-time. The repository manager maintains a list of all node types and the ability
 * to retrieve node types by {@link Name}.
 *
 * <p>The JCR 1.0 and 2.0 specifications both require that node type information be shared across
 * all sessions within a repository and that the {@link javax.jcr.nodetype.NodeTypeManager} perform
 * operations based on the string versions of {@link Name}s based on the permanent
 * (workspace-scoped) and transient (session-scoped) namespace mappings. ModeShape achieves this by
 * maintaining a single master repository of all node type information (the {@link
 * RepositoryNodeTypeManager}) and per-session wrappers ( {@link JcrNodeTypeManager}) for this
 * master repository that perform {@link String} to {@link Name} translation based on the {@link
 * javax.jcr.Session}'s transient mappings and then delegating node type lookups to the repository
 * manager.
 */
@ThreadSafe
class RepositoryNodeTypeManager implements ChangeSetListener, NodeTypes.Supplier {

  private final JcrRepository.RunningState repository;
  private final ExecutionContext context;
  private final String systemWorkspaceName;
  private final Path nodeTypesPath;
  private final NameFactory nameFactory;
  private final Logger logger = Logger.getLogger(getClass());

  private final ReadWriteLock nodeTypesLock = new ReentrantReadWriteLock();

  @GuardedBy("nodeTypesLock")
  private volatile NodeTypes nodeTypesCache;

  private final QueryParser queryParser;
  private final boolean includeColumnsForInheritedProperties;
  private final boolean includePseudoColumnsInSelectStar;
  private volatile NodeTypeSchemata schemata;

  RepositoryNodeTypeManager(
      JcrRepository.RunningState repository,
      boolean includeColumnsForInheritedProperties,
      boolean includePseudoColumnsInSelectStar) {
    this.repository = repository;
    this.context = repository.context();
    this.nameFactory = this.context.getValueFactories().getNameFactory();
    this.systemWorkspaceName = this.repository.repositoryCache().getSystemWorkspaceName();

    PathFactory pathFactory = this.context.getValueFactories().getPathFactory();
    this.nodeTypesPath = pathFactory.createAbsolutePath(JcrLexicon.SYSTEM, JcrLexicon.NODE_TYPES);
    this.nodeTypesCache = new NodeTypes(this.context);

    this.includeColumnsForInheritedProperties = includeColumnsForInheritedProperties;
    this.includePseudoColumnsInSelectStar = includePseudoColumnsInSelectStar;
    queryParser = new BasicSqlQueryParser();
  }

  RepositoryNodeTypeManager with(
      JcrRepository.RunningState repository,
      boolean includeColumnsForInheritedProperties,
      boolean includePseudoColumnsInSelectStar) {
    assert this.systemWorkspaceName.equals(repository.repositoryCache().getSystemWorkspaceName());
    PathFactory pathFactory = repository.context().getValueFactories().getPathFactory();
    Path nodeTypesPath = pathFactory.createAbsolutePath(JcrLexicon.SYSTEM, JcrLexicon.NODE_TYPES);
    assert this.nodeTypesPath.equals(nodeTypesPath);
    RepositoryNodeTypeManager result =
        new RepositoryNodeTypeManager(
            repository, includeColumnsForInheritedProperties, includePseudoColumnsInSelectStar);
    // Now copy the node types from this cache into the new manager's cache ...
    // (If we didn't do this, we'd have to refresh from the system storage)
    result.nodeTypesCache = result.nodeTypesCache.with(this.nodeTypesCache.getAllNodeTypes());
    return result;
  }

  protected final ValueFactory<String> strings() {
    return this.context.getValueFactories().getStringFactory();
  }

  @Override
  public NodeTypes getNodeTypes() {
    return nodeTypesCache;
  }

  /**
   * Allows the collection of node types to be unregistered if they are not referenced by other node
   * types as supertypes, default primary types of child nodes, or required primary types of child
   * nodes.
   *
   * @param nodeTypeNames the names of the node types to be unregistered
   * @param failIfNodeTypesAreUsed true if this method should fail to unregister the named node
   *     types if any of the node types are still in use by nodes, or false if this method should
   *     not perform such a check
   * @throws NoSuchNodeTypeException if any of the node type names do not correspond to a registered
   *     node type
   * @throws InvalidNodeTypeDefinitionException if any of the node types with the given names cannot
   *     be unregistered because they are the supertype, one of the required primary types, or a
   *     default primary type of a node type that is not being unregistered.
   * @throws RepositoryException if any other error occurs
   */
  void unregisterNodeType(Collection<Name> nodeTypeNames, boolean failIfNodeTypesAreUsed)
      throws NoSuchNodeTypeException, InvalidNodeTypeDefinitionException, RepositoryException {
    CheckArg.isNotNull(nodeTypeNames, "nodeTypeNames");
    if (nodeTypeNames.isEmpty()) return;

    if (failIfNodeTypesAreUsed) {
      long start = System.nanoTime();
      // Search the content graph to make sure that this type isn't being used
      for (Name nodeTypeName : nodeTypeNames) {
        if (isNodeTypeInUse(nodeTypeName)) {
          String name = nodeTypeName.getString(context.getNamespaceRegistry());
          throw new InvalidNodeTypeDefinitionException(
              JcrI18n.cannotUnregisterInUseType.text(name));
        }
      }
      long time =
          TimeUnit.MILLISECONDS.convert(Math.abs(System.nanoTime() - start), TimeUnit.NANOSECONDS);
      logger.debug(
          "{0} milliseconds to check if any of these node types are unused before unregistering them: {1}",
          time, nodeTypeNames);
    }

    try {
      /*
       * Grab an exclusive lock on this data to keep other nodes from being added/saved while the unregistration checks are occurring
       */
      List<JcrNodeType> removedNodeTypes = new ArrayList<JcrNodeType>(nodeTypeNames.size());
      nodeTypesLock.writeLock().lock();
      final NodeTypes nodeTypes = this.nodeTypesCache;

      for (Name nodeTypeName : nodeTypeNames) {
        /*
         * Check that the type names are valid
         */
        if (nodeTypeName == null) {
          throw new NoSuchNodeTypeException(JcrI18n.invalidNodeTypeName.text());
        }
        String name = nodeTypeName.getString(context.getNamespaceRegistry());

        JcrNodeType foundNodeType = nodeTypes.getNodeType(nodeTypeName);
        if (foundNodeType == null) {
          throw new NoSuchNodeTypeException(JcrI18n.noSuchNodeType.text(name));
        }
        removedNodeTypes.add(foundNodeType);

        /*
         * Check that no other node definitions have dependencies on any of the named types
         */
        for (JcrNodeType nodeType : nodeTypes.getAllNodeTypes()) {
          // If this node is also being unregistered, don't run checks against it
          if (nodeTypeNames.contains(nodeType.getInternalName())) {
            continue;
          }

          for (JcrNodeType supertype : nodeType.supertypes()) {
            if (nodeTypeName.equals(supertype.getInternalName())) {
              throw new InvalidNodeTypeDefinitionException(
                  JcrI18n.cannotUnregisterSupertype.text(name, supertype.getName()));
            }
          }

          for (JcrNodeDefinition childNode : nodeType.childNodeDefinitions()) {
            NodeType defaultPrimaryType = childNode.getDefaultPrimaryType();
            if (defaultPrimaryType != null && name.equals(defaultPrimaryType.getName())) {
              throw new InvalidNodeTypeDefinitionException(
                  JcrI18n.cannotUnregisterDefaultPrimaryType.text(
                      name, nodeType.getName(), childNode.getName()));
            }
            if (childNode.requiredPrimaryTypeNameSet().contains(nodeTypeName)) {
              throw new InvalidNodeTypeDefinitionException(
                  JcrI18n.cannotUnregisterRequiredPrimaryType.text(
                      name, nodeType.getName(), childNode.getName()));
            }
          }
        }
      }

      // Create the new cache ...
      NodeTypes newNodeTypes = nodeTypes.without(removedNodeTypes);

      // Remove the node types from persistent storage ...
      SessionCache system = repository.createSystemSession(context, false);
      SystemContent systemContent = new SystemContent(system);
      systemContent.unregisterNodeTypes(
          removedNodeTypes.toArray(new JcrNodeType[removedNodeTypes.size()]));
      systemContent.save();

      // Now change the cache ...
      this.nodeTypesCache = newNodeTypes;
      this.schemata = null;
    } finally {
      nodeTypesLock.writeLock().unlock();
    }
  }

  NodeTypeSchemata getRepositorySchemata() {
    // Try reading first, since this will work most of the time ...
    if (schemata != null) return schemata;
    // This is idempotent, so it's okay not to lock ...
    schemata =
        new NodeTypeSchemata(
            context,
            nodeTypesCache,
            includeColumnsForInheritedProperties,
            includePseudoColumnsInSelectStar);
    return schemata;
  }

  void signalNamespaceChanges() {
    this.schemata = null;
  }

  /**
   * Check if the named node type is in use in any workspace in the repository
   *
   * @param nodeTypeName the name of the node type to check
   * @return true if at least one node is using that type; false otherwise
   * @throws InvalidQueryException if there is an error searching for uses of the named node type
   */
  boolean isNodeTypeInUse(Name nodeTypeName) throws InvalidQueryException {

    String nodeTypeString = nodeTypeName.getString(context.getNamespaceRegistry());
    String expression = "SELECT * from [" + nodeTypeString + "] LIMIT 1";
    TypeSystem typeSystem = context.getValueFactories().getTypeSystem();
    // Parsing must be done now ...
    QueryCommand command = queryParser.parseQuery(expression, typeSystem);
    assert command != null : "Could not parse " + expression;

    Schemata schemata = getRepositorySchemata();

    // Now query the entire repository for any nodes that use this node type ...
    RepositoryCache repoCache = repository.repositoryCache();
    RepositoryQueryManager queryManager = repository.queryManager();
    Set<String> workspaceNames = repoCache.getWorkspaceNames();
    Map<String, NodeCache> overridden = null;
    NodeTypes nodeTypes = repository.nodeTypeManager().getNodeTypes();
    RepositoryIndexes indexDefns = repository.queryManager().getIndexes();
    CancellableQuery query =
        queryManager.query(
            context,
            repoCache,
            workspaceNames,
            overridden,
            command,
            schemata,
            indexDefns,
            nodeTypes,
            null,
            null);
    try {
      QueryResults result = query.execute();
      if (result.isEmpty()) return false;
      if (result.getRowCount() < 0) {
        // Try to get the first row ...
        NodeSequence seq = result.getRows();
        Batch batch = seq.nextBatch();
        while (batch != null) {
          if (batch.hasNext()) return true;
          // It's not common for the first batch may be empty, but it's possible. So try the next
          // batch ...
          batch = seq.nextBatch();
        }
        return false;
      }
      return result.getRowCount() > 0;
    } catch (RepositoryException e) {
      logger.error(e, JcrI18n.errorCheckingNodeTypeUsage, nodeTypeName, e.getLocalizedMessage());
      return true;
    }
  }

  /**
   * Registers a new node type or updates an existing node type using the specified definition and
   * returns the resulting {@code NodeType} object.
   *
   * <p>For details, see {@link #registerNodeTypes(Iterable)}.
   *
   * @param ntd the {@code NodeTypeDefinition} to register
   * @return the newly registered (or updated) {@code NodeType}
   * @throws InvalidNodeTypeDefinitionException if the {@code NodeTypeDefinition} is invalid
   * @throws NodeTypeExistsException if <code>allowUpdate</code> is false and the {@code
   *     NodeTypeDefinition} specifies a node type name that is already registered
   * @throws RepositoryException if another error occurs
   */
  JcrNodeType registerNodeType(NodeTypeDefinition ntd)
      throws InvalidNodeTypeDefinitionException, NodeTypeExistsException, RepositoryException {

    return registerNodeType(ntd, true);
  }

  /**
   * Registers a new node type or updates an existing node type using the specified definition and
   * returns the resulting {@code NodeType} object.
   *
   * <p>For details, see {@link #registerNodeTypes(Iterable)}.
   *
   * @param ntd the {@code NodeTypeDefinition} to register
   * @param failIfNodeTypeExists indicates whether the registration should proceed if there is
   *     already a type with the same name; {@code true} indicates that the registration should fail
   *     with an error if a node type with the same name already exists
   * @return the newly registered (or updated) {@code NodeType}
   * @throws InvalidNodeTypeDefinitionException if the {@code NodeTypeDefinition} is invalid
   * @throws NodeTypeExistsException if <code>allowUpdate</code> is false and the {@code
   *     NodeTypeDefinition} specifies a node type name that is already registered
   * @throws RepositoryException if another error occurs
   */
  JcrNodeType registerNodeType(NodeTypeDefinition ntd, boolean failIfNodeTypeExists)
      throws InvalidNodeTypeDefinitionException, NodeTypeExistsException, RepositoryException {
    assert ntd != null;
    List<JcrNodeType> result =
        registerNodeTypes(Collections.singletonList(ntd), failIfNodeTypeExists, false, true);
    return result.isEmpty() ? null : result.get(0);
  }

  /**
   * Registers or updates the specified {@code Collection} of {@link NodeTypeDefinition} objects.
   *
   * <p>This method is used to register or update a set of node types with mutual dependencies.
   *
   * <p>The effect of this method is &quot;all or nothing&quot;; if an error occurs, no node types
   * are registered or updated.
   *
   * <p><b>ModeShape Implementation Notes</b>
   *
   * <p>ModeShape currently supports registration of batches of types with some constraints.
   * ModeShape will allow types to be registered if they meet the following criteria:
   *
   * <ol>
   *   <li>The batch must consist of {@code NodeTypeDefinitionTemplate node type definition
   *       templates} created through the user's JCR session.
   *   <li>Existing types cannot be modified in-place - They must be unregistered and re-registered
   *   <li>Types must have a non-null, non-empty name
   *   <li>If a primary item name is specified for the node type, it must match the name of a
   *       property OR a child node, not both
   *   <li>Each type must have a valid set of supertypes - that is, the type's supertypes must meet
   *       the following criteria:
   *       <ol>
   *         <li>The type must have at least one supertype (unless the type is {@code nt:base}.
   *         <li>No two supertypes {@code t1} and {@code t2} can declare each declare a property
   *             ({@code p1} and {@code p2}) with the same name and cardinality ({@code
   *             p1.isMultiple() == p2.isMultiple()}). Note that this does prohibit each {@code t1}
   *             and {@code t2} from having a common supertype (or super-supertype, etc.) that
   *             declares a property).
   *         <li>No two supertypes {@code t1} and {@code t2} can declare each declare a child node
   *             ({@code n1} and {@code n2}) with the same name and SNS status ({@code
   *             p1.allowsSameNameSiblings() == p2.allowsSameNameSiblings()}). Note that this does
   *             prohibit each {@code t1} and {@code t2} from having a common supertype (or
   *             super-supertype, etc.) that declares a child node).
   *       </ol>
   *   <li>Each type must have a valid set of properties - that is, the type's properties must meet
   *       the following criteria:
   *       <ol>
   *         <li>Residual property definitions cannot be mandatory
   *         <li>If the property is auto-created, it must specify a default value
   *         <li>If the property is single-valued, it can only specify a single default value
   *         <li>If the property overrides an existing property definition from a supertype, the new
   *             definition must be mandatory if the old definition was mandatory
   *         <li>The property cannot override an existing property definition from a supertype if
   *             the ancestor definition is protected
   *         <li>If the property overrides an existing property definition from a supertype that
   *             specifies value constraints, the new definition must have the same value
   *             constraints as the old definition. <i>This requirement may be relaxed in a future
   *             version of ModeShape.</i>
   *         <li>If the property overrides an existing property definition from a supertype, the new
   *             definition must have the same required type as the old definition or a required
   *             type that can ALWAYS be cast to the required type of the ancestor (see section
   *             3.6.4 of the JCR 2.0 specification)
   *       </ol>
   *       Note that an empty set of properties would meet the above criteria.
   *   <li>The type must have a valid set of child nodes - that is, the types's child nodes must
   *       meet the following criteria:
   *       <ol>
   *         <li>Residual child node definitions cannot be mandatory
   *         <li>If the child node is auto-created, it must specify a default primary type name
   *         <li>If the child node overrides an existing child node definition from a supertype, the
   *             new definition must be mandatory if the old definition was mandatory
   *         <li>The child node cannot override an existing child node definition from a supertype
   *             if the ancestor definition is protected
   *         <li>If the child node overrides an existing child node definition from a supertype, the
   *             required primary types of the new definition must be more restrictive than the
   *             required primary types of the old definition - that is, the new primary types must
   *             defined such that any type that satisfies all of the required primary types for the
   *             new definition must also satisfy all of the required primary types for the old
   *             definition. This requirement is analogous to the requirement that overriding
   *             property definitions have a required type that is always convertible to the
   *             required type of the overridden definition.
   *       </ol>
   *       Note that an empty set of child nodes would meet the above criteria.
   *
   * @param nodeTypeDefns the {@link NodeTypeDefinition node type definitions} to register
   * @return the newly registered (or updated) {@link NodeType NodeTypes}
   * @throws UnsupportedRepositoryOperationException if {@code allowUpdates == true}. ModeShape does
   *     not support this capability at this time but the parameter has been retained for API
   *     compatibility.
   * @throws InvalidNodeTypeDefinitionException if the {@link NodeTypeDefinition} is invalid
   * @throws NodeTypeExistsException if <code>allowUpdate</code> is false and the {@link
   *     NodeTypeDefinition} specifies a node type name that is already registered
   * @throws RepositoryException if another error occurs
   */
  List<JcrNodeType> registerNodeTypes(Iterable<NodeTypeDefinition> nodeTypeDefns)
      throws InvalidNodeTypeDefinitionException, NodeTypeExistsException, RepositoryException {
    return registerNodeTypes(nodeTypeDefns, true, false, true);
  }

  List<JcrNodeType> registerNodeTypes(
      Iterable<NodeTypeDefinition> nodeTypeDefns,
      boolean failIfNodeTypeDefinitionsExist,
      boolean skipIfNodeTypeDefinitionExists,
      boolean persist)
      throws InvalidNodeTypeDefinitionException, NodeTypeExistsException, RepositoryException {

    if (nodeTypeDefns == null) {
      return Collections.emptyList();
    }

    List<JcrNodeType> typesPendingRegistration = new ArrayList<JcrNodeType>();

    try {
      nodeTypesLock.writeLock().lock();
      final NodeTypes nodeTypes = this.nodeTypesCache;

      for (NodeTypeDefinition nodeTypeDefn : nodeTypeDefns) {
        if (nodeTypeDefn instanceof JcrNodeTypeTemplate) {
          // Switch to use this context, so names are properly prefixed ...
          nodeTypeDefn = ((JcrNodeTypeTemplate) nodeTypeDefn).with(context);
        }
        Name internalName = nodeTypes.nameFactory().create(nodeTypeDefn.getName());
        if (internalName == null || internalName.getLocalName().length() == 0) {
          throw new InvalidNodeTypeDefinitionException(JcrI18n.invalidNodeTypeName.text());
        }

        boolean found = nodeTypes.hasNodeType(internalName);
        if (found && failIfNodeTypeDefinitionsExist) {
          String name = nodeTypeDefn.getName();
          throw new NodeTypeExistsException(internalName, JcrI18n.nodeTypeAlreadyExists.text(name));
        }
        if (found && skipIfNodeTypeDefinitionExists) continue;

        List<JcrNodeType> supertypes =
            nodeTypes.supertypesFor(nodeTypeDefn, typesPendingRegistration);
        JcrNodeType nodeType = nodeTypeFrom(nodeTypeDefn, supertypes);

        typesPendingRegistration.add(nodeType);
      }

      if (!typesPendingRegistration.isEmpty()) {
        // Make sure the nodes have primary types that are either already registered, or pending
        // registration ...
        validateTypes(typesPendingRegistration);

        // Validate each of types that should be registered
        for (JcrNodeType typePendingRegistration : typesPendingRegistration) {
          nodeTypes.validate(
              typePendingRegistration,
              Arrays.asList(typePendingRegistration.getDeclaredSupertypes()),
              typesPendingRegistration);
        }

        SystemContent system = null;
        if (persist) {
          SessionCache systemCache = repository.createSystemSession(context, false);
          system = new SystemContent(systemCache);
        }

        for (JcrNodeType nodeType : typesPendingRegistration) {
          if (system != null) system.store(nodeType, true);
        }

        // Create the new cache ...
        NodeTypes newNodeTypes = nodeTypes.with(typesPendingRegistration);

        // Save the changes ...
        if (system != null) system.save();

        // And finally update the capabilities cache ...
        this.nodeTypesCache = newNodeTypes;
        this.schemata = null;
      }
    } finally {
      nodeTypesLock.writeLock().unlock();
    }

    return typesPendingRegistration;
  }

  private void validateTypes(List<JcrNodeType> typesPendingRegistration)
      throws RepositoryException {
    NodeTypes nodeTypes = this.nodeTypesCache;

    for (JcrNodeType nodeType : typesPendingRegistration) {
      for (JcrNodeDefinition nodeDef : nodeType.getDeclaredChildNodeDefinitions()) {
        Name[] requiredPrimaryTypeNames = nodeDef.requiredPrimaryTypeNames();
        for (Name primaryTypeName : requiredPrimaryTypeNames) {
          JcrNodeType requiredPrimaryType =
              nodeTypes.findTypeInMapOrList(primaryTypeName, typesPendingRegistration);
          if (requiredPrimaryType == null) {
            String msg = JcrI18n.invalidPrimaryTypeName.text(primaryTypeName, nodeType.getName());
            throw new RepositoryException(msg);
          }
        }
      }

      if (nodeType.isMixin()) {
        for (NodeType superType : nodeType.getSupertypes()) {
          if (!superType.isMixin()) {
            String msg =
                JcrI18n.invalidMixinSupertype.text(nodeType.getName(), superType.getName());
            throw new RepositoryException(msg);
          }
        }
      }
    }
  }

  private JcrNodeType nodeTypeFrom(NodeTypeDefinition nodeType, List<JcrNodeType> supertypes)
      throws RepositoryException {
    PropertyDefinition[] propDefns = nodeType.getDeclaredPropertyDefinitions();
    NodeDefinition[] childDefns = nodeType.getDeclaredChildNodeDefinitions();
    List<JcrPropertyDefinition> properties = new ArrayList<JcrPropertyDefinition>();
    List<JcrNodeDefinition> childNodes = new ArrayList<JcrNodeDefinition>();

    if (propDefns != null) {
      for (PropertyDefinition propDefn : propDefns) {
        properties.add(propertyDefinitionFrom(propDefn));
      }
    }
    if (childDefns != null) {
      for (NodeDefinition childNodeDefn : childDefns) {
        childNodes.add(childNodeDefinitionFrom(childNodeDefn));
      }
    }

    Name name = nameFactory.create(nodeType.getName());
    Name primaryItemName = nameFactory.create(nodeType.getPrimaryItemName());
    boolean mixin = nodeType.isMixin();
    boolean isAbstract = nodeType.isAbstract();
    boolean queryable = nodeType.isQueryable();
    boolean orderableChildNodes = nodeType.hasOrderableChildNodes();

    NodeKey prototypeKey = repository.repositoryCache().getSystemKey();
    return new JcrNodeType(
        prototypeKey,
        this.context,
        null,
        this,
        name,
        supertypes,
        primaryItemName,
        childNodes,
        properties,
        mixin,
        isAbstract,
        queryable,
        orderableChildNodes);
  }

  private JcrPropertyDefinition propertyDefinitionFrom(PropertyDefinition propDefn)
      throws RepositoryException {
    Name propertyName = nameFactory.create(propDefn.getName());
    int onParentVersionBehavior = propDefn.getOnParentVersion();
    int requiredType = propDefn.getRequiredType();
    boolean mandatory = propDefn.isMandatory();
    boolean multiple = propDefn.isMultiple();
    boolean autoCreated = propDefn.isAutoCreated();
    boolean isProtected = propDefn.isProtected();
    boolean fullTextSearchable = propDefn.isFullTextSearchable();
    boolean queryOrderable = propDefn.isQueryOrderable();

    Value[] defaultValues = propDefn.getDefaultValues();
    JcrValue[] jcrDefaultValues = null;
    if (defaultValues != null) {
      jcrDefaultValues = new JcrValue[defaultValues.length];
      for (int i = 0; i != defaultValues.length; ++i) {
        Value value = defaultValues[i];
        jcrDefaultValues[i] = new JcrValue(this.context.getValueFactories(), value);
      }
    }

    String[] valueConstraints = propDefn.getValueConstraints();
    String[] queryOperators = propDefn.getAvailableQueryOperators();
    if (valueConstraints == null) valueConstraints = new String[0];
    NodeKey prototypeKey = repository.repositoryCache().getSystemKey();
    return new JcrPropertyDefinition(
        this.context,
        null,
        prototypeKey,
        propertyName,
        onParentVersionBehavior,
        autoCreated,
        mandatory,
        isProtected,
        jcrDefaultValues,
        requiredType,
        valueConstraints,
        multiple,
        fullTextSearchable,
        queryOrderable,
        queryOperators);
  }

  private JcrNodeDefinition childNodeDefinitionFrom(NodeDefinition childNodeDefn) {
    Name childNodeName = nameFactory.create(childNodeDefn.getName());
    Name defaultPrimaryTypeName = nameFactory.create(childNodeDefn.getDefaultPrimaryTypeName());
    int onParentVersion = childNodeDefn.getOnParentVersion();

    boolean mandatory = childNodeDefn.isMandatory();
    boolean allowsSns = childNodeDefn.allowsSameNameSiblings();
    boolean autoCreated = childNodeDefn.isAutoCreated();
    boolean isProtected = childNodeDefn.isProtected();

    Name[] requiredTypes;
    String[] requiredTypeNames = childNodeDefn.getRequiredPrimaryTypeNames();
    if (requiredTypeNames != null) {
      List<Name> names = new ArrayList<Name>(requiredTypeNames.length);
      for (String typeName : requiredTypeNames) {
        names.add(nameFactory.create(typeName));
      }
      requiredTypes = names.toArray(new Name[names.size()]);
    } else {
      requiredTypes = new Name[0];
    }

    NodeKey prototypeKey = repository.repositoryCache().getSystemKey();
    return new JcrNodeDefinition(
        this.context,
        null,
        prototypeKey,
        childNodeName,
        onParentVersion,
        autoCreated,
        mandatory,
        isProtected,
        allowsSns,
        defaultPrimaryTypeName,
        requiredTypes);
  }

  @Override
  public void notify(ChangeSet changeSet) {
    if (!systemWorkspaceName.equals(changeSet.getWorkspaceName())) {
      // The change does not affect the 'system' workspace, so skip it ...
      return;
    }
    if (context.getProcessId().equals(changeSet.getProcessKey())) {
      // We generated these changes, so skip them ...
      return;
    }

    // Now process the changes ...
    Set<Name> nodeTypesToRefresh = new HashSet<Name>();
    Set<Name> nodeTypesToDelete = new HashSet<Name>();
    for (Change change : changeSet) {
      if (change instanceof NodeAdded) {
        NodeAdded added = (NodeAdded) change;
        Path addedPath = added.getPath();
        if (nodeTypesPath.isAncestorOf(addedPath)) {
          // Get the name of the node type ...
          Name nodeTypeName = addedPath.getSegment(2).getName();
          nodeTypesToRefresh.add(nodeTypeName);
        }
      } else if (change instanceof NodeRemoved) {
        NodeRemoved removed = (NodeRemoved) change;
        Path removedPath = removed.getPath();
        if (nodeTypesPath.isAncestorOf(removedPath)) {
          // Get the name of the node type ...
          Name nodeTypeName = removedPath.getSegment(2).getName();
          if (removedPath.size() == 3) {
            nodeTypesToDelete.add(nodeTypeName);
          } else {
            // It's a child defn or property defn ...
            if (!nodeTypesToDelete.contains(nodeTypeName)) {
              // The child defn or property defn is being removed but the node type is not ...
              nodeTypesToRefresh.add(nodeTypeName);
            }
          }
        }
      } else if (change instanceof PropertyChanged) {
        PropertyChanged propChanged = (PropertyChanged) change;
        Path changedPath = propChanged.getPathToNode();
        if (nodeTypesPath.isAncestorOf(changedPath)) {
          // Get the name of the node type ...
          Name nodeTypeName = changedPath.getSegment(2).getName();
          nodeTypesToRefresh.add(nodeTypeName);
        }
      } // we don't care about node moves (don't happen) or property added/removed (handled by node
        // add/remove)
    }

    if (nodeTypesToRefresh.isEmpty() && nodeTypesToDelete.isEmpty()) {
      // No changes
      return;
    }

    // There were at least some changes ...
    this.nodeTypesLock.writeLock().lock();
    try {
      // Re-register the node types that were changed or added ...
      SessionCache systemCache = repository.createSystemSession(context, false);
      SystemContent system = new SystemContent(systemCache);
      Collection<NodeTypeDefinition> nodeTypes = system.readNodeTypes(nodeTypesToRefresh);
      registerNodeTypes(nodeTypes, false, false, false);

      // Unregister those that were removed ...
      unregisterNodeType(nodeTypesToDelete, false);
    } catch (Throwable e) {
      logger.error(e, JcrI18n.errorRefreshingNodeTypes, repository.name());
    } finally {
      this.nodeTypesLock.writeLock().unlock();
    }
  }

  /**
   * Refresh the node types from the stored representation.
   *
   * @return true if there was at least one node type found, or false if there were none
   */
  protected boolean refreshFromSystem() {
    this.nodeTypesLock.writeLock().lock();
    try {
      // Re-read and re-register all of the node types ...
      SessionCache systemCache = repository.createSystemSession(context, true);
      SystemContent system = new SystemContent(systemCache);
      Collection<NodeTypeDefinition> nodeTypes = system.readAllNodeTypes();
      if (nodeTypes.isEmpty()) return false;
      registerNodeTypes(nodeTypes, false, false, false);
      return true;
    } catch (Throwable e) {
      logger.error(e, JcrI18n.errorRefreshingNodeTypes, repository.name());
      return false;
    } finally {
      this.nodeTypesLock.writeLock().unlock();
    }
  }

  @Override
  public String toString() {
    return getNodeTypes().toString();
  }
}
예제 #12
0
public class DoPut extends AbstractMethod {

  private static Logger LOG = Logger.getLogger(DoPut.class);

  private final IWebdavStore store;
  private final IResourceLocks resourceLocks;
  private final boolean readOnly;
  private final boolean lazyFolderCreationOnPut;

  private String userAgent;

  public DoPut(
      IWebdavStore store,
      IResourceLocks resLocks,
      boolean readOnly,
      boolean lazyFolderCreationOnPut) {
    this.store = store;
    this.resourceLocks = resLocks;
    this.readOnly = readOnly;
    this.lazyFolderCreationOnPut = lazyFolderCreationOnPut;
  }

  @Override
  public void execute(ITransaction transaction, HttpServletRequest req, HttpServletResponse resp)
      throws IOException, LockFailedException {
    LOG.trace("-- " + this.getClass().getName());

    if (!readOnly) {
      String path = getRelativePath(req);
      String parentPath = getParentPath(path);

      userAgent = req.getHeader("User-Agent");

      if (isOSXFinder() && req.getContentLength() == 0) {
        // OS X Finder sends 2 PUTs; first has 0 content, second has content.
        // This is the first one, so we'll ignore it ...
        LOG.trace("-- First of multiple OS-X Finder PUT calls at {0}", path);
      }

      Hashtable<String, Integer> errorList = new Hashtable<String, Integer>();

      if (isOSXFinder()) {
        // OS X Finder sends 2 PUTs; first has 0 content, second has content.
        // This is the second one that was preceded by a LOCK, so don't need to check the locks ...
      } else {
        if (!isUnlocked(transaction, req, resourceLocks, parentPath)) {
          LOG.trace("-- Locked parent at {0}", path);
          resp.setStatus(WebdavStatus.SC_LOCKED);
          return; // parent is locked
        }

        if (!isUnlocked(transaction, req, resourceLocks, path)) {
          LOG.trace("-- Locked resource at {0}", path);
          resp.setStatus(WebdavStatus.SC_LOCKED);
          return; // resource is locked
        }
      }

      String tempLockOwner = "doPut" + System.currentTimeMillis() + req.toString();
      if (resourceLocks.lock(transaction, path, tempLockOwner, false, 0, TEMP_TIMEOUT, TEMPORARY)) {
        StoredObject parentSo, so = null;
        try {
          parentSo = store.getStoredObject(transaction, parentPath);
          if (parentPath != null && parentSo != null && parentSo.isResource()) {
            resp.sendError(WebdavStatus.SC_FORBIDDEN);
            return;

          } else if (parentPath != null && parentSo == null && lazyFolderCreationOnPut) {
            store.createFolder(transaction, parentPath);

          } else if (parentPath != null && parentSo == null && !lazyFolderCreationOnPut) {
            errorList.put(parentPath, WebdavStatus.SC_NOT_FOUND);
            sendReport(req, resp, errorList);
            return;
          }

          LOG.trace("-- Looking for the stored object at {0}", path);
          so = store.getStoredObject(transaction, path);

          if (so == null) {
            LOG.trace("-- Creating resource in the store at {0}", path);
            store.createResource(transaction, path);
            // resp.setStatus(WebdavStatus.SC_CREATED);
          } else {
            // This has already been created, just update the data
            LOG.trace("-- There is already a resource at {0}", path);
            if (so.isNullResource()) {

              LockedObject nullResourceLo = resourceLocks.getLockedObjectByPath(transaction, path);
              if (nullResourceLo == null) {
                LOG.trace("-- Unable to obtain resource lock object at {0}", path);
                resp.sendError(WebdavStatus.SC_INTERNAL_SERVER_ERROR);
                return;
              }
              LOG.trace("-- Found resource lock object at {0}", path);
              String nullResourceLockToken = nullResourceLo.getID();
              String[] lockTokens = getLockIdFromIfHeader(req);
              String lockToken = null;
              if (lockTokens != null) {
                lockToken = lockTokens[0];
              } else {
                LOG.trace("-- No lock tokens found in resource lock object at {0}", path);
                resp.sendError(WebdavStatus.SC_BAD_REQUEST);
                return;
              }
              if (lockToken.equals(nullResourceLockToken)) {
                so.setNullResource(false);
                so.setFolder(false);

                String[] nullResourceLockOwners = nullResourceLo.getOwner();
                String owner = null;
                if (nullResourceLockOwners != null) {
                  owner = nullResourceLockOwners[0];
                }

                if (!resourceLocks.unlock(transaction, lockToken, owner)) {
                  resp.sendError(WebdavStatus.SC_INTERNAL_SERVER_ERROR);
                }
              } else {
                errorList.put(path, WebdavStatus.SC_LOCKED);
                sendReport(req, resp, errorList);
              }
            } else {
              LOG.trace("-- Found a lock for the (existing) resource at {0}", path);
            }
          }
          // User-Agent workarounds
          doUserAgentWorkaround(resp);

          // setting resourceContent
          LOG.trace("-- Setting resource content at {0}", path);
          long resourceLength =
              store.setResourceContent(transaction, path, req.getInputStream(), null, null);

          so = store.getStoredObject(transaction, path);
          if (so == null) {
            resp.setStatus(WebdavStatus.SC_NOT_FOUND);
          } else if (resourceLength != -1) {
            so.setResourceLength(resourceLength);
          }
          // Now lets report back what was actually saved

        } catch (AccessDeniedException e) {
          LOG.trace(e, "Access denied when working with {0}", path);
          resp.sendError(WebdavStatus.SC_FORBIDDEN);
        } catch (WebdavException e) {
          LOG.trace(e, "WebDAV exception when working with {0}", path);
          resp.sendError(WebdavStatus.SC_INTERNAL_SERVER_ERROR);
        } finally {
          resourceLocks.unlockTemporaryLockedObjects(transaction, path, tempLockOwner);
        }
      } else {
        LOG.trace("Lock was not acquired when working with {0}", path);
        resp.sendError(WebdavStatus.SC_INTERNAL_SERVER_ERROR);
      }
    } else {
      LOG.trace("Readonly={0}", readOnly);
      resp.sendError(WebdavStatus.SC_FORBIDDEN);
    }
  }

  /** @param resp */
  private void doUserAgentWorkaround(HttpServletResponse resp) {
    if (isOSXFinder()) {
      LOG.trace("DoPut.execute() : do workaround for user agent '" + userAgent + "'");
      resp.setStatus(WebdavStatus.SC_CREATED);
    } else if (userAgent != null && userAgent.contains("Transmit")) {
      // Transmit also uses WEBDAVFS 1.x.x but crashes
      // with SC_CREATED response
      LOG.trace("DoPut.execute() : do workaround for user agent '" + userAgent + "'");
      resp.setStatus(WebdavStatus.SC_NO_CONTENT);
    } else {
      resp.setStatus(WebdavStatus.SC_CREATED);
    }
  }

  private boolean isOSXFinder() {
    return (userAgent != null && userAgent.contains("WebDAVFS") && !userAgent.contains("Transmit"));
  }
}
예제 #13
0
/** @author Randall Hauch ([email protected]) */
public abstract class BufferingSequence extends DelegatingSequence {

  protected static final Logger logger = Logger.getLogger(BufferingSequence.class);
  protected static final boolean trace = logger.isTraceEnabled();

  protected final SortingBuffer<Object, BufferedRow> buffer;
  protected final BufferedRowFactory<? extends BufferedRow> rowFactory;
  protected final ExtractFromRow extractor;
  protected final CachedNodeSupplier cache;
  protected final int width;
  protected final String workspaceName;
  protected final AtomicLong remainingRowCount = new AtomicLong();
  protected final AtomicLong rowsLeftInBatch = new AtomicLong();

  @SuppressWarnings("unchecked")
  protected BufferingSequence(
      String workspaceName,
      NodeSequence delegate,
      ExtractFromRow extractor,
      BufferManager bufferMgr,
      CachedNodeSupplier nodeCache,
      boolean pack,
      boolean useHeap,
      boolean allowDuplicates) {
    super(delegate);
    assert extractor != null;
    this.workspaceName = workspaceName;
    this.width = delegate.width();
    this.cache = nodeCache;
    this.extractor = extractor;

    // Set up the row factory based upon the width of the delegate sequence...
    this.rowFactory = BufferedRows.serializer(nodeCache, width);

    // Set up the buffer ...
    SortingBuffer<Object, BufferedRow> buffer = null;
    TypeFactory<?> keyType = extractor.getType();
    if (allowDuplicates) {
      @SuppressWarnings("rawtypes")
      Serializer<? extends Comparable> keySerializer =
          (Serializer<? extends Comparable<?>>) bufferMgr.serializerFor(keyType);
      buffer =
          bufferMgr
              .createSortingWithDuplicatesBuffer(
                  keySerializer,
                  extractor.getType().getComparator(),
                  (BufferedRowFactory<BufferedRow>) rowFactory)
              .keepSize(true)
              .useHeap(useHeap)
              .make();
    } else {
      BTreeKeySerializer<Object> keySerializer =
          (BTreeKeySerializer<Object>) bufferMgr.bTreeKeySerializerFor(keyType, pack);
      if (keySerializer instanceof KeySerializerWithComparator) {
        keySerializer =
            ((KeySerializerWithComparator<Object>) keySerializer)
                .withComparator(extractor.getType().getComparator());
      }
      buffer =
          bufferMgr
              .createSortingBuffer(keySerializer, (BufferedRowFactory<BufferedRow>) rowFactory)
              .keepSize(true)
              .useHeap(useHeap)
              .make();
    }
    this.buffer = buffer;
  }

  @Override
  public boolean isEmpty() {
    return false;
  }

  protected long rowCount() {
    return buffer.size();
  }

  protected BufferedRow createRow(Batch currentRow) {
    return rowFactory.createRow(currentRow);
  }

  /**
   * Load all of the rows from the supplied sequence into the buffer.
   *
   * @param sequence the node sequence; may not be null
   * @param extractor the extractor for the sortable value; may not be null
   * @param rowsWithNullKey the buffer into which should be placed all rows for which the extracted
   *     key value is null; may be null if these are not to be kept
   * @return the size of the first batch, or 0 if there are no rows found
   */
  protected int loadAll(
      NodeSequence sequence,
      ExtractFromRow extractor,
      DistinctBuffer<BufferedRow> rowsWithNullKey) {
    // Put all of the batches from the sequence into the buffer
    Batch batch = sequence.nextBatch();
    int batchSize = 0;
    Object value = null;
    while (batch != null && batchSize == 0) {
      while (batch.hasNext()) {
        batch.nextRow();
        value = extractor.getValueInRow(batch);
        if (value instanceof Object[]) {
          // Put each of the values in the buffer ...
          for (Object v : (Object[]) value) {
            buffer.put(v, createRow(batch));
          }
        } else if (value != null) {
          buffer.put(value, createRow(batch));
        } else if (rowsWithNullKey != null) {
          rowsWithNullKey.addIfAbsent(createRow(batch));
        }
        ++batchSize;
      }
      batch = sequence.nextBatch();
    }
    while (batch != null) {
      while (batch.hasNext()) {
        batch.nextRow();
        value = extractor.getValueInRow(batch);
        if (value instanceof Object[]) {
          // Put each of the values in the buffer ...
          for (Object v : (Object[]) value) {
            buffer.put(v, createRow(batch));
          }
        } else if (value != null) {
          buffer.put(value, createRow(batch));
        } else if (rowsWithNullKey != null) {
          rowsWithNullKey.addIfAbsent(createRow(batch));
        }
      }
      batch = sequence.nextBatch();
    }
    return batchSize;
  }

  protected Batch batchFrom(final Iterator<BufferedRow> rows, final long maxBatchSize) {
    if (rows == null || !rows.hasNext()) return null;
    if (maxBatchSize == 0 || remainingRowCount.get() <= 0)
      return NodeSequence.emptyBatch(workspaceName, this.width);
    final long rowsInBatch = Math.min(maxBatchSize, remainingRowCount.get());
    rowsLeftInBatch.set(rowsInBatch);
    return new Batch() {
      private BufferedRow current;

      @Override
      public int width() {
        return width;
      }

      @Override
      public long rowCount() {
        return rowsInBatch;
      }

      @Override
      public String getWorkspaceName() {
        return workspaceName;
      }

      @Override
      public boolean isEmpty() {
        return rowsInBatch <= 0;
      }

      @Override
      public boolean hasNext() {
        return rowsLeftInBatch.get() > 0 && rows.hasNext();
      }

      @Override
      public void nextRow() {
        current = rows.next();
        remainingRowCount.decrementAndGet();
        rowsLeftInBatch.decrementAndGet();
      }

      @Override
      public CachedNode getNode() {
        return current.getNode();
      }

      @Override
      public CachedNode getNode(int index) {
        return current.getNode(index);
      }

      @Override
      public float getScore() {
        return current.getScore();
      }

      @Override
      public float getScore(int index) {
        return current.getScore(index);
      }

      @Override
      public String toString() {
        return "(buffered-batch size=" + rowsInBatch + " )";
      }
    };
  }

  @Override
  public void close() {
    try {
      super.close();
    } finally {
      buffer.close();
    }
  }
}
예제 #14
0
/**
 * A {@link BinaryStore} implementation that stores files in other BinaryStores. This store is
 * initialized with a map of number of BinaryStores. On retrieval, the CompositeBinaryStore will
 * look in all the other BinaryStores for the value. When storing a value, the CompositeBinaryStore
 * may receive a StorageHint that MAY be used when determining which named BinaryStore to write to.
 * If a storage hint is not provided (or doesn't match a store), the value will be stored in the
 * default store.
 */
public class CompositeBinaryStore implements BinaryStore {

  private static final String DEFAULT_STRATEGY_HINT = "default";
  private volatile TextExtractors extractors;
  private volatile MimeTypeDetector detector = NullMimeTypeDetector.INSTANCE;

  protected Logger logger = Logger.getLogger(getClass());

  private Map<String, BinaryStore> namedStores;
  private BinaryStore defaultBinaryStore;

  /**
   * Initialize a new CompositeBinaryStore using a Map of other BinaryKeys that are keyed by an
   * implementer-provided key. The named stores must include a default BinaryStore that will be used
   * in the absence of storage hints.
   *
   * @param namedStores a {@code Map} of inner stores, grouped by the hint.
   */
  public CompositeBinaryStore(Map<String, BinaryStore> namedStores) {
    this.namedStores = namedStores;
    this.defaultBinaryStore = null;
  }

  /** Initialize the store, and initialize all the named stores. */
  @Override
  public void start() {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      bs.start();
    }
  }

  /** Shut down all the named stores */
  @Override
  public void shutdown() {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      bs.shutdown();
    }
  }

  @Override
  public long getMinimumBinarySizeInBytes() {
    long minimumBinarySize = Long.MAX_VALUE;

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      if (minimumBinarySize > bs.getMinimumBinarySizeInBytes()) {
        minimumBinarySize = bs.getMinimumBinarySizeInBytes();
      }
    }

    return minimumBinarySize;
  }

  @Override
  public void setMinimumBinarySizeInBytes(long minSizeInBytes) {

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      bs.setMinimumBinarySizeInBytes(minSizeInBytes);
    }
  }

  @Override
  public void setTextExtractors(TextExtractors textExtractors) {
    CheckArg.isNotNull(textExtractors, "textExtractors");
    this.extractors = textExtractors;

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();
    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      bs.setTextExtractors(textExtractors);
    }
  }

  @Override
  public void setMimeTypeDetector(MimeTypeDetector mimeTypeDetector) {
    this.detector = mimeTypeDetector != null ? mimeTypeDetector : NullMimeTypeDetector.INSTANCE;

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      bs.setMimeTypeDetector(mimeTypeDetector);
    }
  }

  @Override
  public BinaryValue storeValue(InputStream stream) throws BinaryStoreException {
    return storeValue(stream, DEFAULT_STRATEGY_HINT);
  }

  @Override
  public BinaryValue storeValue(InputStream stream, String hint) throws BinaryStoreException {
    BinaryStore binaryStore = selectBinaryStore(hint);
    BinaryValue bv = binaryStore.storeValue(stream);
    logger.debug("Stored binary " + bv.getKey() + " into binary store " + binaryStore);
    return bv;
  }

  /**
   * Move a value from one named store to another store
   *
   * @param key Binary key to transfer from the source store to the destination store
   * @param source a hint for discovering the source repository; may be null
   * @param destination a hint for discovering the destination repository
   * @return the {@link BinaryKey} value of the moved binary, never {@code null}
   * @throws BinaryStoreException if a source store cannot be found or the source store does not
   *     contain the binary key
   */
  public BinaryKey moveValue(BinaryKey key, String source, String destination)
      throws BinaryStoreException {
    final BinaryStore sourceStore;

    if (source == null) {
      sourceStore = findBinaryStoreContainingKey(key);
    } else {
      sourceStore = selectBinaryStore(source);
    }

    // could not find source store, or
    if (sourceStore == null || !sourceStore.hasBinary(key)) {
      throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(key, sourceStore));
    }

    BinaryStore destinationStore = selectBinaryStore(destination);

    // key is already in the destination store
    if (sourceStore.equals(destinationStore)) {
      return key;
    }

    final BinaryValue binaryValue = storeValue(sourceStore.getInputStream(key), destination);
    sourceStore.markAsUnused(java.util.Collections.singleton(key));

    return binaryValue.getKey();
  }

  /**
   * Move a BinaryKey to a named store
   *
   * @param key Binary key to transfer from the source store to the destination store
   * @param destination a hint for discovering the destination repository
   * @throws BinaryStoreException if anything unexpected fails
   */
  public void moveValue(BinaryKey key, String destination) throws BinaryStoreException {
    moveValue(key, null, destination);
  }

  @Override
  public InputStream getInputStream(BinaryKey key) throws BinaryStoreException {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      final Map.Entry<String, BinaryStore> entry = it.next();

      final String binaryStoreKey = entry.getKey();

      BinaryStore binaryStore = entry.getValue();
      logger.trace("Checking binary store " + binaryStoreKey + " for key " + key);
      try {
        return binaryStore.getInputStream(key);
      } catch (BinaryStoreException e) {
        // this exception is "normal", and is thrown
        logger.trace(e, "The named store " + binaryStoreKey + " raised exception");
      }
    }

    throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(key, this.toString()));
  }

  @Override
  public boolean hasBinary(BinaryKey key) {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      BinaryStore bs = it.next().getValue();
      if (bs.hasBinary(key)) {
        return true;
      }
    }

    return false;
  }

  @SuppressWarnings("unused")
  @Override
  public void markAsUnused(Iterable<BinaryKey> keys) throws BinaryStoreException {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      Map.Entry<String, BinaryStore> entry = it.next();

      final String binaryStoreKey = entry.getKey();
      BinaryStore bs = entry.getValue();

      try {
        bs.markAsUnused(keys);
      } catch (BinaryStoreException e) {
        logger.debug(e, "The named store " + binaryStoreKey + " raised exception");
      }
    }
  }

  @SuppressWarnings("unused")
  @Override
  public void removeValuesUnusedLongerThan(long minimumAge, TimeUnit unit)
      throws BinaryStoreException {
    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      Map.Entry<String, BinaryStore> entry = it.next();

      final String binaryStoreKey = entry.getKey();
      BinaryStore bs = entry.getValue();

      try {
        bs.removeValuesUnusedLongerThan(minimumAge, unit);
      } catch (BinaryStoreException e) {
        logger.debug(e, "The named store " + binaryStoreKey + " raised exception");
      }
    }
  }

  @Override
  public String getText(BinaryValue binary) throws BinaryStoreException {

    if (binary instanceof InMemoryBinaryValue) {
      if (extractors == null || !extractors.extractionEnabled()) {
        return null;
      }

      // The extracted text will never be stored, so try directly using the text extractors ...
      return extractors.extract((InMemoryBinaryValue) binary, new TextExtractorContext(detector));
    }

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      Map.Entry<String, BinaryStore> entry = it.next();

      final String binaryStoreKey = entry.getKey();
      BinaryStore bs = entry.getValue();
      try {
        if (bs.hasBinary(binary.getKey())) {
          return bs.getText(binary);
        }
      } catch (BinaryStoreException e) {
        logger.debug(e, "The named store " + binaryStoreKey + " raised exception");
        if (!it.hasNext()) {
          throw e;
        }
      }
    }

    throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(binary.getKey(), this));
  }

  @Override
  public String getMimeType(BinaryValue binary, String name)
      throws IOException, RepositoryException {
    if (detector == null) {
      return null;
    }

    String detectedMimeType = detector.mimeTypeOf(name, binary);
    if (binary instanceof InMemoryBinaryValue) {
      return detectedMimeType;
    }

    Iterator<Map.Entry<String, BinaryStore>> it = getNamedStoreIterator();

    while (it.hasNext()) {
      Map.Entry<String, BinaryStore> entry = it.next();

      final String binaryStoreKey = entry.getKey();
      BinaryStore bs = entry.getValue();

      try {
        if (bs.hasBinary(binary.getKey())) {
          return bs.getMimeType(binary, name);
        }
      } catch (BinaryStoreException e) {
        logger.debug(e, "The named store " + binaryStoreKey + " raised exception");
        if (!it.hasNext()) {
          throw e;
        }
      }
    }

    throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(binary.getKey(), this));
  }

  @Override
  public Iterable<BinaryKey> getAllBinaryKeys() throws BinaryStoreException {

    Iterable<BinaryKey> generatedIterable = new HashSet<BinaryKey>();
    Iterator<Map.Entry<String, BinaryStore>> binaryStoreIterator = getNamedStoreIterator();

    while (binaryStoreIterator.hasNext()) {
      BinaryStore bs = binaryStoreIterator.next().getValue();

      generatedIterable = Collections.concat(generatedIterable, bs.getAllBinaryKeys());
    }

    return generatedIterable;
  }

  /**
   * Get an iterator over all the named stores
   *
   * @return an iterator over the map of binary stores and their given names
   */
  public Iterator<Map.Entry<String, BinaryStore>> getNamedStoreIterator() {
    return namedStores.entrySet().iterator();
  }

  /**
   * Get the named binary store that contains the key
   *
   * @param key the key to the binary content; never null
   * @return the BinaryStore that contains the given key
   */
  public BinaryStore findBinaryStoreContainingKey(BinaryKey key) {
    Iterator<Map.Entry<String, BinaryStore>> binaryStoreIterator = getNamedStoreIterator();

    while (binaryStoreIterator.hasNext()) {
      BinaryStore bs = binaryStoreIterator.next().getValue();
      if (bs.hasBinary(key)) {
        return bs;
      }
    }

    return null;
  }

  /**
   * Select a named binary store for the given hint
   *
   * @param hint a hint to a binary store; possibly null
   * @return a named BinaryStore from the hint, or the default store
   */
  private BinaryStore selectBinaryStore(String hint) {

    BinaryStore namedBinaryStore = null;

    if (hint != null) {
      logger.trace("Selecting named binary store for hint: " + hint);
      namedBinaryStore = namedStores.get(hint);
    }

    if (namedBinaryStore == null) {
      namedBinaryStore = getDefaultBinaryStore();
    }

    logger.trace("Selected binary store: " + namedBinaryStore.toString());

    return namedBinaryStore;
  }

  private BinaryStore getDefaultBinaryStore() {
    if (defaultBinaryStore == null) {
      if (namedStores.containsKey(DEFAULT_STRATEGY_HINT)) {
        defaultBinaryStore = namedStores.get(DEFAULT_STRATEGY_HINT);
      } else {
        logger.trace(
            "Did not find a named binary store with the key 'default', picking the first binary store in the list");
        final Iterator<BinaryStore> iterator = namedStores.values().iterator();

        if (iterator.hasNext()) {
          defaultBinaryStore = iterator.next();
        }
      }
    }

    return defaultBinaryStore;
  }
}
예제 #15
0
/**
 * An append only journal implementation which stores each {@link ChangeSet} (either local or
 * remove) on the local FS.
 *
 * @author Horia Chiorean ([email protected])
 */
@ThreadSafe
public class LocalJournal implements ChangeJournal {
  private static final Logger LOGGER = Logger.getLogger(LocalJournal.class);

  private static final ReadWriteLock RW_LOCK = new ReentrantReadWriteLock(true);
  private static final int DEFAULT_MAX_TIME_TO_KEEP_FILES = -1;
  private static final String RECORDS_FIELD = "records";
  private static final String JOURNAL_ID_FIELD = "journalId";
  private static final TimeBasedKeys TIME_BASED_KEYS = TimeBasedKeys.create();

  /**
   * When searching records in the local journal, we want to use a small delta to compensate for the
   * fact that there is slight delay from the point in time when a change set is created (after
   * session.save) to the point when the journal record is added (notify async).
   */
  private static final long DEFAULT_LOCAL_SEARCH_DELTA = TimeUnit.SECONDS.toMillis(1);

  private final String journalLocation;
  private final boolean asyncWritesEnabled;
  private final long maxTimeToKeepEntriesMillis;

  private String journalId;
  private DB journalDB;
  /**
   * The records are a map of {@link org.modeshape.jcr.journal.JournalRecord} instances keyed by a
   * time-based key.
   */
  private BTreeMap<Long, JournalRecord> records;

  private long searchTimeDelta;
  private volatile boolean stopped = false;

  /**
   * Creates a new journal instance, in stopped state.
   *
   * @param journalLocation the folder location on the FS where the entries should be saved. Must
   *     not be {@code null}
   * @param asyncWritesEnabled flag which indicates if disk write should be asynchronous or not.
   * @param maxDaysToKeepEntries the maximum number of days this journal should store entries on
   *     disk. A negative value or 0
   */
  public LocalJournal(
      String journalLocation, boolean asyncWritesEnabled, int maxDaysToKeepEntries) {
    CheckArg.isNotNull(journalLocation, "journalLocation");

    this.journalLocation = journalLocation;
    this.asyncWritesEnabled = asyncWritesEnabled;
    this.maxTimeToKeepEntriesMillis = TimeUnit.DAYS.toMillis(maxDaysToKeepEntries);
    this.stopped = true;
    this.searchTimeDelta = DEFAULT_LOCAL_SEARCH_DELTA;
  }

  protected LocalJournal(String journalLocation) {
    this(journalLocation, false, DEFAULT_MAX_TIME_TO_KEEP_FILES);
  }

  @SuppressWarnings("rawtypes")
  @Override
  public void start() throws RepositoryException {
    if (!stopped) {
      return;
    }
    RW_LOCK.writeLock().lock();
    try {
      File journalFileLocation = new File(journalLocation);
      if (!journalFileLocation.exists()) {
        boolean folderHierarchyCreated = journalFileLocation.mkdirs();
        assert folderHierarchyCreated;
      }

      /**
       * TODO author=Horia Chiorean date=1/14/14 description=The following should be enabled when
       * append only files are available DBMaker dbMaker = DBMaker.newAppendFileDB(new
       * File(journalFileLocation, RECORDS_FIELD)) .compressionEnable() .checksumEnable()
       * .closeOnJvmShutdown() .snapshotEnable();
       */
      DBMaker dbMaker =
          DBMaker.newFileDB(new File(journalFileLocation, RECORDS_FIELD))
              .compressionEnable()
              .checksumEnable()
              .mmapFileEnableIfSupported()
              .snapshotEnable();
      if (asyncWritesEnabled) {
        dbMaker.asyncWriteEnable();
      }
      this.journalDB = dbMaker.make();
      this.records = this.journalDB.createTreeMap(RECORDS_FIELD).counterEnable().makeOrGet();
      Atomic.String journalAtomic = this.journalDB.getAtomicString(JOURNAL_ID_FIELD);
      // only write the value the first time
      if (StringUtil.isBlank(journalAtomic.get())) {
        journalAtomic.set("journal_" + UUID.randomUUID().toString());
      }
      this.journalId = journalAtomic.get();
      this.stopped = false;
    } catch (Exception e) {
      throw new RepositoryException(JcrI18n.cannotStartJournal.text(), e);
    } finally {
      RW_LOCK.writeLock().unlock();
    }
  }

  @Override
  public void shutdown() {
    if (this.stopped) {
      return;
    }
    RW_LOCK.writeLock().lock();
    this.stopped = true;
    try {
      this.journalDB.commit();
      this.journalDB.close();
    } catch (Exception e) {
      LOGGER.error(e, JcrI18n.cannotStopJournal);
    } finally {
      RW_LOCK.writeLock().unlock();
    }
  }

  @Override
  public void notify(ChangeSet changeSet) {
    // do not store records from jcr:system
    boolean systemWorkspaceChanges =
        RepositoryConfiguration.SYSTEM_WORKSPACE_NAME.equalsIgnoreCase(
            changeSet.getWorkspaceName());
    if (changeSet.isEmpty() || systemWorkspaceChanges) {
      return;
    }
    addRecords(new JournalRecord(changeSet));
  }

  @Override
  public void addRecords(JournalRecord... records) {
    if (stopped) {
      return;
    }
    RW_LOCK.writeLock().lock();
    try {
      LOGGER.debug("Adding {0} records", records.length);
      for (JournalRecord record : records) {
        if (record.getTimeBasedKey() < 0) {
          // generate a unique timestamp only if there isn't one. In some scenarios (i.e. running in
          // a cluster) we
          // always want to keep the original TS because otherwise it would be impossible to have a
          // correct order
          // and therefore search
          long createTimeMillisUTC = TIME_BASED_KEYS.nextKey();
          record.withTimeBasedKey(createTimeMillisUTC);
        }
        this.records.put(record.getTimeBasedKey(), record);
      }
      this.journalDB.commit();
    } finally {
      RW_LOCK.writeLock().unlock();
    }
  }

  @Override
  public void removeOldRecords() {
    // perform cleanup
    removeRecordsOlderThan(System.currentTimeMillis() - this.maxTimeToKeepEntriesMillis);
  }

  protected synchronized void removeRecordsOlderThan(long millisInUtc) {
    RW_LOCK.writeLock().lock();
    try {
      if (millisInUtc <= 0 || stopped) {
        return;
      }
      long searchBound = TIME_BASED_KEYS.getCounterEndingAt(millisInUtc);
      LOGGER.debug("Removing records older than " + searchBound);
      NavigableMap<Long, JournalRecord> toRemove = this.records.headMap(searchBound);
      toRemove.clear();
      journalDB.commit();
      journalDB.compact();
    } finally {
      RW_LOCK.writeLock().unlock();
    }
  }

  protected String getJournalLocation() {
    return journalLocation;
  }

  @Override
  public Records allRecords(boolean descendingOrder) {
    return recordsFrom(records, descendingOrder);
  }

  @Override
  public JournalRecord lastRecord() {
    return this.records == null || this.records.isEmpty()
        ? null
        : this.records.lastEntry().getValue();
  }

  @Override
  public Records recordsNewerThan(
      DateTime changeSetTime, boolean inclusive, boolean descendingOrder) {
    if (stopped) {
      return Records.EMPTY;
    }

    long changeSetMillisUTC = -1;
    long searchBound = -1;
    if (changeSetTime != null) {
      changeSetMillisUTC = changeSetTime.getMillis();
      // adjust the millis using a delta so that we are sure we catch everything in a cluster which
      // may have differences in
      // clock time
      searchBound = TIME_BASED_KEYS.getCounterStartingAt(changeSetMillisUTC - searchTimeDelta);
    }

    NavigableMap<Long, JournalRecord> subMap = records.tailMap(searchBound, true);

    // process each of the records from the result and look at the timestamp of the changeset, so
    // that we're sure we only include
    // the correct ones (we used a delta to make sure we get everything)
    long startKeyInSubMap = -1;
    for (Long timeBasedKey : subMap.keySet()) {
      JournalRecord record = subMap.get(timeBasedKey);
      long recordChangeTimeMillisUTC = record.getChangeTimeMillis();
      if (((recordChangeTimeMillisUTC == changeSetMillisUTC) && inclusive)
          || recordChangeTimeMillisUTC > changeSetMillisUTC) {
        startKeyInSubMap = timeBasedKey;
        break;
      }
    }
    return startKeyInSubMap != -1
        ? recordsFrom(subMap.tailMap(startKeyInSubMap, true), descendingOrder)
        : Records.EMPTY;
  }

  @Override
  public String journalId() {
    return journalId;
  }

  protected LocalJournal withSearchTimeDelta(final long searchTimeDelta) {
    this.searchTimeDelta = searchTimeDelta;
    return this;
  }

  private static Records recordsFrom(
      final NavigableMap<Long, JournalRecord> content, boolean descending) {
    final Iterator<Long> iterator =
        descending ? content.descendingKeySet().iterator() : content.keySet().iterator();
    return new Records() {
      @Override
      public int size() {
        return content.size();
      }

      @Override
      public Iterator<JournalRecord> iterator() {
        return new Iterator<JournalRecord>() {
          @Override
          public boolean hasNext() {
            return iterator.hasNext();
          }

          @Override
          public JournalRecord next() {
            return content.get(iterator.next());
          }

          @Override
          public void remove() {
            throw new UnsupportedOperationException("This iterator is read-only");
          }
        };
      }

      @Override
      public boolean isEmpty() {
        return size() == 0;
      }
    };
  }
}
예제 #16
0
/** @author kulikov */
public class CassandraBinaryStore extends AbstractBinaryStore {

  private static final Logger LOGGER = Logger.getLogger(CassandraBinaryStore.class);

  private static final boolean ALIVE = true;
  private static final boolean UNUSED = false;

  private Cluster cluster;
  private Session session;
  private String address;

  private FileSystemBinaryStore cache;

  public CassandraBinaryStore(String address) {
    this.address = address;
    this.cache = TransientBinaryStore.get();
  }

  @Override
  protected String getStoredMimeType(BinaryValue source) throws BinaryStoreException {
    try {
      checkContentExists(source);
      ResultSet rs =
          session.execute(
              "SELECT mime_type FROM modeshape.binary WHERE cid = '" + source.getKey() + "';");
      Row row = rs.one();
      if (row == null) {
        throw new BinaryStoreException(
            JcrI18n.unableToFindBinaryValue.text(source.getKey(), session));
      }
      return row.getString("mime_type");
    } catch (BinaryStoreException e) {
      throw e;
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  private void checkContentExists(BinaryValue source) throws BinaryStoreException {
    if (!contentExists(source.getKey(), true)) {
      throw new BinaryStoreException(
          JcrI18n.unableToFindBinaryValue.text(source.getKey(), session));
    }
  }

  @Override
  protected void storeMimeType(BinaryValue source, String mimeType) throws BinaryStoreException {
    try {
      session.execute(
          "UPDATE modeshape.binary SET mime_type='"
              + mimeType
              + "' where cid='"
              + source.getKey()
              + "';");
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public void storeExtractedText(BinaryValue source, String extractedText)
      throws BinaryStoreException {
    try {
      session.execute(
          "UPDATE modeshape.binary SET ext_text='"
              + extractedText
              + "' where cid='"
              + source.getKey()
              + "';");
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public String getExtractedText(BinaryValue source) throws BinaryStoreException {
    try {
      checkContentExists(source);
      ResultSet rs =
          session.execute(
              "SELECT ext_text FROM modeshape.binary WHERE cid = '" + source.getKey() + "';");
      Row row = rs.one();
      if (row == null) {
        throw new BinaryStoreException(
            JcrI18n.unableToFindBinaryValue.text(source.getKey(), session));
      }
      return row.getString("ext_text");
    } catch (BinaryStoreException e) {
      throw e;
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public BinaryValue storeValue(InputStream stream) throws BinaryStoreException {
    // store into temporary file system store and get SHA-1
    BinaryValue temp = cache.storeValue(stream);
    try {
      // prepare new binary key based on SHA-1
      BinaryKey key = new BinaryKey(temp.getKey().toString());

      // check for duplicate content
      if (this.contentExists(key, ALIVE)) {
        return new StoredBinaryValue(this, key, temp.getSize());
      }

      // check unused content
      if (this.contentExists(key, UNUSED)) {
        session.execute("UPDATE modeshape.binary SET usage=1 WHERE cid='" + key + "';");
        return new StoredBinaryValue(this, key, temp.getSize());
      }

      // store content
      PreparedStatement query =
          session.prepare(
              "INSERT INTO modeshape.binary (cid, usage_time, payload, usage) VALUES ( ?,?,?,1 );");
      BoundStatement statement = new BoundStatement(query);
      session.execute(statement.bind(key.toString(), new Date(), buffer(stream)));
      return new StoredBinaryValue(this, key, temp.getSize());
    } catch (BinaryStoreException e) {
      throw e;
    } catch (IOException e) {
      throw new BinaryStoreException(e);
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    } finally {
      // remove content from temp store
      cache.markAsUnused(temp.getKey());
    }
  }

  @Override
  public InputStream getInputStream(BinaryKey key) throws BinaryStoreException {
    try {
      ResultSet rs =
          session.execute(
              "SELECT payload FROM modeshape.binary WHERE cid='"
                  + key.toString()
                  + "' and usage=1;");
      Row row = rs.one();
      if (row == null) {
        throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(key, session));
      }

      ByteBuffer buffer = row.getBytes("payload");
      return new BufferedInputStream(buffer);
    } catch (BinaryStoreException e) {
      throw e;
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public void markAsUnused(Iterable<BinaryKey> keys) throws BinaryStoreException {
    try {
      for (BinaryKey key : keys) {
        session.execute("UPDATE modeshape.binary SET usage=0 where cid='" + key + "';");
      }
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public void removeValuesUnusedLongerThan(long minimumAge, TimeUnit unit)
      throws BinaryStoreException {
    try {
      Date deadline = new Date(new Date().getTime() - unit.toMillis(minimumAge));
      // When querying using 2nd indexes, Cassandra
      // (it's not CQL specific) requires that you use an '=' for at least one of
      // the indexed column in the where clause. This is a limitation of Cassandra.
      // So we have to do some tricks here
      ResultSet rs =
          session.execute(
              "SELECT cid from modeshape.binary where usage=0 and usage_time < "
                  + deadline.getTime()
                  + " allow filtering;");

      Iterator<Row> rows = rs.iterator();
      while (rows.hasNext()) {
        session.execute(
            "DELETE from modeshape.binary where cid = '" + rows.next().getString("cid") + "';");
      }

      rs =
          session.execute(
              "SELECT cid from modeshape.binary where usage=1 and usage_time < "
                  + deadline.getTime()
                  + " allow filtering;");
      rows = rs.iterator();
      while (rows.hasNext()) {
        session.execute(
            "DELETE from modeshape.binary where cid = '" + rows.next().getString("cid") + "';");
      }
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public Iterable<BinaryKey> getAllBinaryKeys() throws BinaryStoreException {
    try {
      ResultSet rs = session.execute("SELECT cid from modeshape.binary WHERE usage=1;");
      Iterator<Row> it = rs.iterator();
      HashSet<BinaryKey> keys = new HashSet<BinaryKey>();
      while (it.hasNext()) {
        keys.add(new BinaryKey(it.next().getString("cid")));
      }
      return keys;
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  @Override
  public void start() {
    cluster = Cluster.builder().addContactPoint(address).build();
    Metadata metadata = cluster.getMetadata();
    LOGGER.debug("Connected to cluster: {0}", metadata.getClusterName());
    for (Host host : metadata.getAllHosts()) {
      LOGGER.debug(
          "Datacenter: {0}; Host: {1}; Rack: {2}",
          host.getDatacenter(), host.getAddress(), host.getRack());
    }

    session = cluster.connect();
    try {
      session.execute(
          "CREATE KEYSPACE modeshape WITH replication "
              + "= {'class':'SimpleStrategy', 'replication_factor':3};");
    } catch (AlreadyExistsException e) {
    }

    session.execute("USE modeshape;");

    try {
      session.execute(
          "CREATE TABLE modeshape.binary("
              + "cid text PRIMARY KEY,"
              + "mime_type text,"
              + "ext_text text,"
              + "usage int,"
              + "usage_time timestamp,"
              + "payload blob)");
    } catch (AlreadyExistsException e) {
    }

    try {
      session.execute("CREATE INDEX USAGE_IDX ON modeshape.binary (usage);");
    } catch (InvalidQueryException e) {
      // exists
    }

    try {
      session.execute("CREATE INDEX EXPIRE_IDX ON modeshape.binary (usage_time);");
    } catch (InvalidQueryException e) {
      // exists
    }
  }

  /**
   * Test content for existence.
   *
   * @param key content identifier
   * @param alive true inside used content and false for checking within content marked as unused.
   * @return true if content found
   * @throws BinaryStoreException
   */
  private boolean contentExists(BinaryKey key, boolean alive) throws BinaryStoreException {
    try {
      String query = "SELECT payload from modeshape.binary where cid='" + key.toString() + "'";
      query = alive ? query + " and usage=1;" : query + " and usage = 0;";
      ResultSet rs = session.execute(query);
      return rs.iterator().hasNext();
    } catch (RuntimeException e) {
      throw new BinaryStoreException(e);
    }
  }

  /**
   * Converts input stream into ByteBuffer.
   *
   * @param stream
   * @return the byte buffer
   * @throws IOException
   */
  private ByteBuffer buffer(InputStream stream) throws IOException {
    stream.reset();
    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    IOUtils.copy(stream, bout);
    return ByteBuffer.wrap(bout.toByteArray());
  }

  protected final class BufferedInputStream extends InputStream {
    private ByteBuffer buffer;

    protected BufferedInputStream(ByteBuffer buffer) {
      this.buffer = buffer;
    }

    @Override
    public int read() {
      return buffer.position() < buffer.limit() ? buffer.get() & 0xff : -1;
    }
  }
}
예제 #17
0
/**
 * An {@link Environment} that can be used within a local (non-clustered) process.
 *
 * <p>To use a custom Environment instance, simply create a {@link RepositoryConfiguration} as usual
 * but then call the {@link RepositoryConfiguration#with(Environment)} with the Environment instance
 * and then use the resulting RepositoryConfiguration instance.
 *
 * <p>When a ModeShape {@link RepositoryConfiguration repository configuration} defines cache
 * containers with configuration files on the file system or the classpath, then a {@link
 * LocalEnvironment} instance can be used as-is with no other configuration or setup.
 *
 * <p>If applications wish to programmatically configure the Infinispan caches or cache containers,
 * then those configurations can be registered with a LocalEnvironment instance. Specifically, the
 * {@link #addCacheContainer(String, CacheContainer)} and {@link #addCacheContainerIfAbsent(String,
 * CacheContainer)} methods register a programmatically created instance of a {@link
 * CacheContainer}. Alternatively, the {@link #defineCache(String, String, Configuration)} method
 * can be used to register a named cache with a programmatically created {@link Configuration
 * Infinispan cache configuration}.
 */
public class LocalEnvironment implements Environment {

  public static final Class<? extends TransactionManagerLookup>
      DEFAULT_TRANSACTION_MANAGER_LOOKUP_CLASS = GenericTransactionManagerLookup.class;

  /**
   * The name for the default cache container that is used when {@link #getCacheContainer()} is
   * called or if null is supplied as the name in {@link #getCacheContainer(String)}.
   */
  public static final String DEFAULT_CONFIGURATION_NAME = "defaultCacheContainer";

  private final Class<? extends TransactionManagerLookup> transactionManagerLookupClass;
  private final ConcurrentMap<String, CacheContainer> containers =
      new ConcurrentHashMap<String, CacheContainer>();
  private volatile boolean shared = false;
  private final Logger logger = Logger.getLogger(getClass());

  public LocalEnvironment() {
    this.transactionManagerLookupClass = DEFAULT_TRANSACTION_MANAGER_LOOKUP_CLASS;
  }

  public LocalEnvironment(Class<? extends TransactionManagerLookup> transactionManagerLookupClass) {
    if (transactionManagerLookupClass == null)
      transactionManagerLookupClass = DEFAULT_TRANSACTION_MANAGER_LOOKUP_CLASS;
    this.transactionManagerLookupClass = transactionManagerLookupClass;
  }

  /**
   * Get the default cache container.
   *
   * @return the default cache container; never null
   * @throws IOException
   * @throws NamingException
   */
  public CacheContainer getCacheContainer() throws IOException, NamingException {
    return getCacheContainer(null);
  }

  @Override
  public synchronized CacheContainer getCacheContainer(String name)
      throws IOException, NamingException {
    if (name == null) name = DEFAULT_CONFIGURATION_NAME;
    CacheContainer container = containers.get(name);
    if (container == null) {
      container = createContainer(name);
      containers.put(name, container);
    }
    return container;
  }

  /**
   * Shutdown this environment, allowing it to reclaim any resources.
   *
   * <p>This method does nothing if the environment has been marked as {@link #isShared() shared}.
   */
  @Override
  public synchronized void shutdown() {
    if (!shared) doShutdown();
  }

  /** Shutdown all containers and caches. */
  protected void doShutdown() {
    for (CacheContainer container : containers.values()) {
      shutdown(container);
    }
    containers.clear();
  }

  @Override
  public ClassLoader getClassLoader(ClassLoader fallbackLoader, String... classpathEntries) {
    List<String> urls = new ArrayList<String>();
    if (classpathEntries != null) {
      for (String url : classpathEntries) {
        if (!StringUtil.isBlank(url)) {
          urls.add(url);
        }
      }
    }
    List<ClassLoader> delegatesList = new ArrayList<ClassLoader>();
    if (!urls.isEmpty()) {
      StringURLClassLoader urlClassLoader = new StringURLClassLoader(urls);
      // only if any custom urls were parsed add this loader
      if (urlClassLoader.getURLs().length > 0) {
        delegatesList.add(urlClassLoader);
      }
    }

    ClassLoader currentLoader = getClass().getClassLoader();
    if (fallbackLoader != null && !fallbackLoader.equals(currentLoader)) {
      // if the parent of fallback is the same as the current loader, just use that
      if (fallbackLoader.getParent().equals(currentLoader)) {
        currentLoader = fallbackLoader;
      } else {
        delegatesList.add(fallbackLoader);
      }
    }

    return delegatesList.isEmpty()
        ? currentLoader
        : new DelegatingClassLoader(currentLoader, delegatesList);
  }

  protected void shutdown(CacheContainer container) {
    container.stop();
  }

  protected Class<? extends TransactionManagerLookup> transactionManagerLookupClass() {
    return transactionManagerLookupClass;
  }

  protected TransactionManagerLookup transactionManagerLookupInstance() {
    try {
      return transactionManagerLookupClass().newInstance();
    } catch (Throwable t) {
      throw new RuntimeException(t);
    }
  }

  protected CacheContainer createContainer(String configFile) throws IOException, NamingException {
    CacheContainer container = null;
    // First try finding the cache configuration ...
    if (configFile != null && !configFile.equals(DEFAULT_CONFIGURATION_NAME)) {
      configFile = configFile.trim();
      try {
        logger.debug("Starting cache manager using configuration at '{0}'", configFile);
        container = new DefaultCacheManager(configFile);
      } catch (FileNotFoundException e) {
        // Configuration file was not found, so try JNDI using configFileName as JNDI name...
        container = (CacheContainer) jndiContext().lookup(configFile);
      }
    }
    if (container == null) {
      // The default Infinispan configuration is in-memory, local and non-clustered.
      // But we need a transaction manager, so use the generic TM which is a good default ...
      ConfigurationBuilder config = createDefaultConfigurationBuilder();
      GlobalConfigurationBuilder global = createGlobalConfigurationBuilder();
      container = createContainer(global, config);
    }
    return container;
  }

  /**
   * Create the default configuration.
   *
   * @return the default cache configuration.
   */
  protected ConfigurationBuilder createDefaultConfigurationBuilder() {
    ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
    configurationBuilder.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
    configurationBuilder.transaction().transactionManagerLookup(transactionManagerLookupInstance());
    configurationBuilder.transaction().lockingMode(LockingMode.PESSIMISTIC);
    return configurationBuilder;
  }

  /**
   * Create the global configuration.
   *
   * @return the global configuration.
   */
  protected GlobalConfigurationBuilder createGlobalConfigurationBuilder() {
    GlobalConfigurationBuilder global = new GlobalConfigurationBuilder();
    global.globalJmxStatistics().allowDuplicateDomains(true);
    // TODO author=Horia Chiorean date=7/26/12 description=MODE-1524 - Currently we don't use
    // advanced externalizers
    // global =
    // global.fluent().serialization().addAdvancedExternalizer(Schematic.externalizers()).build();
    return global;
  }

  /**
   * Create a cache container using the supplied configurations.
   *
   * @param globalConfigurationBuilder the global configuration builder
   * @param configurationBuilder the default cache configuration builder
   * @return the cache container
   */
  protected CacheContainer createContainer(
      GlobalConfigurationBuilder globalConfigurationBuilder,
      ConfigurationBuilder configurationBuilder) {
    GlobalConfiguration globalConfiguration = globalConfigurationBuilder.build();
    Configuration configuration = configurationBuilder.build();
    logger.debug(
        "Starting cache manager with global configuration \n{0}\nand default configuration:\n{1}",
        globalConfiguration, configuration);
    return new DefaultCacheManager(globalConfiguration, configuration);
  }

  /**
   * Create the default configuration.
   *
   * @return the default cache configuration.
   * @deprecated see {@link #createDefaultConfigurationBuilder()}
   */
  @Deprecated
  protected Configuration createDefaultConfiguration() {
    return createDefaultConfigurationBuilder().build();
  }

  /**
   * Create the global configuration.
   *
   * @return the global configuration.
   * @deprecated see {@link #createGlobalConfigurationBuilder()}
   */
  @Deprecated
  protected GlobalConfiguration createGlobalConfiguration() {
    return createGlobalConfigurationBuilder().build();
  }

  /**
   * Create a cache container using the supplied configurations.
   *
   * @param globalConfiguration the global configuration
   * @param configuration the default cache configuration
   * @return the cache container
   * @deprecated use {@link #createContainer(GlobalConfigurationBuilder, ConfigurationBuilder)}
   *     instead
   */
  @Deprecated
  protected CacheContainer createContainer(
      GlobalConfiguration globalConfiguration, Configuration configuration) {
    logger.debug(
        "Starting cache manager with global configuration \n{0}\nand default configuration:\n{1}",
        globalConfiguration, configuration);
    return new DefaultCacheManager(globalConfiguration, configuration);
  }

  protected Context jndiContext() throws NamingException {
    return new InitialContext();
  }

  /**
   * Add the supplied {@link CacheContainer} under the supplied name if and only if there is not
   * already a cache container registered at that name.
   *
   * @param name the cache container name; may be null if the {@link #DEFAULT_CONFIGURATION_NAME
   *     default configuration name} should be used
   * @param cacheContainer the cache container; may not be null
   */
  public void addCacheContainerIfAbsent(String name, CacheContainer cacheContainer) {
    CheckArg.isNotNull(cacheContainer, "cacheContainer");
    containers.putIfAbsent(name, cacheContainer);
  }

  /**
   * Add the supplied {@link CacheContainer} under the supplied name if and only if there is not
   * already a cache container registered at that name.
   *
   * @param name the cache container name; may be null if the {@link #DEFAULT_CONFIGURATION_NAME
   *     default configuration name} should be used
   * @param cacheContainer the cache container; may not be null
   * @return the cache container that was previously registered in this environment by the supplied
   *     name, or null if there was no such previously-registered cache container
   */
  public CacheContainer addCacheContainer(String name, CacheContainer cacheContainer) {
    CheckArg.isNotNull(cacheContainer, "cacheContainer");
    return containers.put(name, cacheContainer);
  }

  /**
   * Define within the default cache container an Infinispan cache with the given cache name and
   * configuration. Note that the cache container is created if required, but if it exists it must
   * implement the {@link EmbeddedCacheManager} interface for this method to succeed.
   *
   * @param cacheName the name of the cache being defined; may not be null
   * @param configuration the cache configuration; may not be null
   * @return the clone of the supplied configuration that is used by the cache container; never null
   */
  public Configuration defineCache(String cacheName, Configuration configuration) {
    CheckArg.isNotNull(cacheName, "cacheName");
    CheckArg.isNotNull(configuration, "configuration");
    return defineCache(null, cacheName, configuration);
  }

  /**
   * Define within the named cache container an Infinispan cache with the given cache name and
   * configuration. Note that the cache container is created if required, but if it exists it must
   * implement the {@link EmbeddedCacheManager} interface for this method to succeed.
   *
   * @param cacheContainerName the name of the cache container; if null, the {@link
   *     #DEFAULT_CONFIGURATION_NAME default container name} is used
   * @param cacheName the name of the cache being defined; may not be null
   * @param configuration the cache configuration; may not be null
   * @return the clone of the supplied configuration that is used by the cache container; never null
   */
  public Configuration defineCache(
      String cacheContainerName, String cacheName, Configuration configuration) {
    CheckArg.isNotNull(cacheName, "cacheName");
    CheckArg.isNotNull(configuration, "configuration");
    if (cacheContainerName == null) cacheContainerName = DEFAULT_CONFIGURATION_NAME;
    CacheContainer container = containers.get(cacheContainerName);
    if (container == null) {
      Configuration config = createDefaultConfiguration();
      GlobalConfiguration global = createGlobalConfiguration();
      CacheContainer newContainer = createContainer(global, config);
      container = containers.putIfAbsent(cacheContainerName, newContainer);
      if (container == null) container = newContainer;
    }
    return ((EmbeddedCacheManager) container).defineConfiguration(cacheName, configuration);
  }

  /**
   * Set whether this environment is shared amongst multiple repositories. Shared environments are
   * not shutdown automatically, and the application is expected to shutdown all containers and
   * caches. By default, environments are not shared unless this method is explicitly called with a
   * parameter value of <code>true</code>.
   *
   * @param shared true if this environment is shared, or false otherwise
   * @see #isShared()
   */
  public void setShared(boolean shared) {
    this.shared = shared;
  }

  /**
   * Return whether this environment is shared amongst multiple repositories.
   *
   * @return true if this environment is shared, or false otherwise
   * @see #setShared(boolean)
   */
  public boolean isShared() {
    return shared;
  }
}
예제 #18
0
  @Override
  public void run() {
    JcrSession inputSession = null;
    JcrSession outputSession = null;
    final RunningState state = repository.runningState();
    final RepositoryStatistics stats = state.statistics();
    Sequencer sequencer = null;
    String sequencerName = null;
    try {
      // Create the required session(s) ...
      inputSession = state.loginInternalSession(work.getInputWorkspaceName());
      if (work.getOutputWorkspaceName() != null
          && !work.getOutputWorkspaceName().equals(work.getInputWorkspaceName())) {
        outputSession = state.loginInternalSession(work.getOutputWorkspaceName());
      } else {
        outputSession = inputSession;
      }

      // Get the sequencer ...
      sequencer = state.sequencers().getSequencer(work.getSequencerId());
      if (sequencer == null) return;
      sequencerName = sequencer.getName();

      // Find the selected node ...
      AbstractJcrNode selectedNode = inputSession.getNode(work.getSelectedPath());

      // Find the input that has changed and is to be sequenced ...
      Item inputItem = inputSession.getItem(work.getInputPath());
      Property changedProperty = null;
      if (inputItem instanceof Property) {
        changedProperty = (Property) inputItem;
      } else {
        Node changedNode = (Node) inputItem;
        // now look for a property that was changed or added ...
        changedProperty = changedNode.getProperty(work.getChangedPropertyName());
      }
      assert changedProperty != null;

      if (sequencer.hasAcceptedMimeTypes()) {
        // Get the MIME type, first by looking at the changed property's parent node
        // (or grand-parent node if parent is 'jcr:content') ...
        Node parent = changedProperty.getParent();
        String mimeType = null;
        if (parent.hasProperty(JcrConstants.JCR_MIME_TYPE)) {
          // The parent node has a 'jcr:mimeType' node ...
          Property property = parent.getProperty(JcrConstants.JCR_MIME_TYPE);
          if (!property.isMultiple()) {
            // The standard 'jcr:mimeType' property is single valued, but we're technically not
            // checking if
            // the property has that particular property definition (only by name) ...
            mimeType = property.getString();
          }
        } else if (parent.getName().equals(JcrConstants.JCR_CONTENT)) {
          // There is no 'jcr:mimeType' property, and since the sequenced property is on the
          // 'jcr:content' node,
          // get the parent (probably 'nt:file') node and look for the 'jcr:mimeType' property there
          // ...
          try {
            parent = parent.getParent();
            if (parent.hasProperty(JcrConstants.JCR_MIME_TYPE)) {
              Property property = parent.getProperty(JcrConstants.JCR_MIME_TYPE);
              if (!property.isMultiple()) {
                // The standard 'jcr:mimeType' property is single valued, but we're technically not
                // checking if
                // the property has that particular property definition (only by name) ...
                mimeType = property.getString();
              }
            }
          } catch (ItemNotFoundException e) {
            // must be the root ...
          }
        }
        if (mimeType == null
            && !changedProperty.isMultiple()
            && changedProperty.getType() == PropertyType.BINARY) {
          // Still don't know the MIME type of the property, so if it's a BINARY property we can
          // check it ...
          javax.jcr.Binary binary = changedProperty.getBinary();
          if (binary instanceof org.modeshape.jcr.api.Binary) {
            mimeType = ((org.modeshape.jcr.api.Binary) binary).getMimeType(parent.getName());
          }
        }

        // See if the sequencer accepts the MIME type ...
        if (mimeType != null && !sequencer.isAccepted(mimeType)) {
          return; // nope
        }
      }

      AbstractJcrNode outputNode = null;
      String primaryType = null;
      if (work.getSelectedPath().equals(work.getOutputPath())) {
        // The output is to go directly under the sequenced node ...
        outputNode =
            selectedNode.getName().equals(JcrConstants.JCR_CONTENT)
                ? selectedNode.getParent()
                : selectedNode;
        primaryType = selectedNode.getPrimaryNodeType().getName();
      } else {
        // Find the parent of the output if it exists, or create the node(s) along the path if not
        // ...
        Node parentOfOutput = null;
        try {
          parentOfOutput = outputSession.getNode(work.getOutputPath());
        } catch (PathNotFoundException e) {
          JcrTools tools = new JcrTools();
          parentOfOutput = tools.findOrCreateNode(outputSession, work.getOutputPath());
        }

        // Now determine the name of top node in the output, using the last segment of the selected
        // path ...
        String outputNodeName = computeOutputNodeName(selectedNode);

        // Remove any existing output (from a prior sequencing run on this same input) ...
        removeExistingOutputNodes(parentOfOutput, outputNodeName, work.getSelectedPath());

        // Create the output node
        if (parentOfOutput.isNew() && parentOfOutput.getName().equals(outputNodeName)) {
          // avoid creating a duplicate path with the same name
          outputNode = (AbstractJcrNode) parentOfOutput;
        } else {
          outputNode =
              (AbstractJcrNode)
                  parentOfOutput.addNode(outputNodeName, JcrConstants.NT_UNSTRUCTURED);
        }

        // and make sure the output node has the 'mode:derived' mixin ...
        outputNode.addMixin(DERIVED_NODE_TYPE_NAME);
        outputNode.setProperty(DERIVED_FROM_PROPERTY_NAME, work.getSelectedPath());
      }

      // Execute the sequencer ...
      DateTime now = outputSession.dateFactory().create();
      Sequencer.Context context =
          new SequencingContext(
              now, outputSession.getValueFactory(), outputSession.context().getMimeTypeDetector());
      if (inputSession.isLive() && (inputSession == outputSession || outputSession.isLive())) {
        final long start = System.nanoTime();

        try {
          if (sequencer.execute(changedProperty, outputNode, context)) {
            // Make sure that the sequencer did not change the primary type of the selected node ..
            if (selectedNode == outputNode
                && !selectedNode.getPrimaryNodeType().getName().equals(primaryType)) {
              String msg =
                  RepositoryI18n.sequencersMayNotChangeThePrimaryTypeOfTheSelectedNode.text();
              throw new RepositoryException(msg);
            }

            // find the new nodes created by the sequencing before saving, so we can properly fire
            // the events
            List<AbstractJcrNode> outputNodes = findOutputNodes(outputNode);

            // set the createdBy property (if it applies) to the user which triggered the
            // sequencing, not the context
            // of the saving session
            setCreatedByIfNecessary(outputSession, outputNodes);

            // outputSession
            outputSession.save();

            // fire the sequencing event after save (hopefully by this time the transaction has been
            // committed)
            fireSequencingEvent(selectedNode, outputNodes, outputSession, sequencerName);

            long durationInNanos = System.nanoTime() - start;
            Map<String, String> payload = new HashMap<String, String>();
            payload.put("sequencerName", sequencer.getClass().getName());
            payload.put("sequencedPath", changedProperty.getPath());
            payload.put("outputPath", outputNode.getPath());
            stats.recordDuration(
                DurationMetric.SEQUENCER_EXECUTION_TIME,
                durationInNanos,
                TimeUnit.NANOSECONDS,
                payload);
          }
        } catch (Throwable t) {
          fireSequencingFailureEvent(selectedNode, inputSession, t, sequencerName);
          // let it bubble down, because we still want to log it and update the stats
          throw t;
        }
      }
    } catch (Throwable t) {
      Logger logger = Logger.getLogger(getClass());
      if (work.getOutputWorkspaceName() != null) {
        logger.error(
            t,
            RepositoryI18n.errorWhileSequencingNodeIntoWorkspace,
            sequencerName,
            state.name(),
            work.getInputPath(),
            work.getInputWorkspaceName(),
            work.getOutputPath(),
            work.getOutputWorkspaceName());
      } else {
        logger.error(
            t,
            RepositoryI18n.errorWhileSequencingNode,
            sequencerName,
            state.name(),
            work.getInputPath(),
            work.getInputWorkspaceName(),
            work.getOutputPath());
      }
    } finally {
      stats.increment(ValueMetric.SEQUENCED_COUNT);
      stats.decrement(ValueMetric.SEQUENCER_QUEUE_SIZE);
      if (inputSession != null && inputSession.isLive()) inputSession.logout();
      if (outputSession != null && outputSession != inputSession && outputSession.isLive())
        outputSession.logout();
    }
  }