コード例 #1
0
 // todo remove this once we move to new metamodel
 public static RegionFactory createRegionFactory(Properties properties, boolean cachingEnabled) {
   // todo : REMOVE!  THIS IS TOTALLY A TEMPORARY HACK FOR org.hibernate.cfg.AnnotationBinder which
   // will be going away
   String regionFactoryClassName =
       RegionFactoryInitiator.mapLegacyNames(
           ConfigurationHelper.getString(
               AvailableSettings.CACHE_REGION_FACTORY, properties, null));
   if (regionFactoryClassName == null) {
     regionFactoryClassName = DEF_CACHE_REG_FACTORY;
   }
   LOG.debugf("Cache region factory : %s", regionFactoryClassName);
   try {
     try {
       return (RegionFactory)
           org.hibernate.internal.util.ReflectHelper.classForName(regionFactoryClassName)
               .getConstructor(Properties.class)
               .newInstance(properties);
     } catch (NoSuchMethodException e) {
       // no constructor accepting Properties found, try no arg constructor
       LOG.debugf(
           "%s did not provide constructor accepting java.util.Properties; attempting no-arg constructor.",
           regionFactoryClassName);
       return (RegionFactory)
           org.hibernate.internal.util.ReflectHelper.classForName(regionFactoryClassName)
               .newInstance();
     }
   } catch (Exception e) {
     throw new HibernateException(
         "could not instantiate RegionFactory [" + regionFactoryClassName + "]", e);
   }
 }
  /**
   * Coordinates the efforts to load a given entity. First, an attempt is made to load the entity
   * from the session-level cache. If not found there, an attempt is made to locate it in
   * second-level cache. Lastly, an attempt is made to load it directly from the datasource.
   *
   * @param event The load event
   * @return The loaded entity, or null.
   */
  protected Serializable resolveNaturalId(final ResolveNaturalIdEvent event) {
    final EntityPersister persister = event.getEntityPersister();

    final boolean traceEnabled = LOG.isTraceEnabled();
    if (traceEnabled)
      LOG.tracev(
          "Attempting to resolve: {0}",
          MessageHelper.infoString(
              persister, event.getNaturalIdValues(), event.getSession().getFactory()));

    Serializable entityId = resolveFromCache(event);
    if (entityId != null) {
      if (traceEnabled)
        LOG.tracev(
            "Resolved object in cache: {0}",
            MessageHelper.infoString(
                persister, event.getNaturalIdValues(), event.getSession().getFactory()));
      return entityId;
    }

    if (traceEnabled)
      LOG.tracev(
          "Object not resolved in any cache: {0}",
          MessageHelper.infoString(
              persister, event.getNaturalIdValues(), event.getSession().getFactory()));

    return loadFromDatasource(event);
  }
コード例 #3
0
  /**
   * Prepares the save call using a newly generated id.
   *
   * @param entity The entity to be saved
   * @param entityName The entity-name for the entity to be saved
   * @param anything Generally cascade-specific information.
   * @param source The session which is the source of this save event.
   * @param requiresImmediateIdAccess does the event context require access to the identifier
   *     immediately after execution of this method (if not, post-insert style id generators may be
   *     postponed if we are outside a transaction).
   * @return The id used to save the entity; may be null depending on the type of id generator used
   *     and the requiresImmediateIdAccess value
   */
  protected Serializable saveWithGeneratedId(
      Object entity,
      String entityName,
      Object anything,
      EventSource source,
      boolean requiresImmediateIdAccess) {
    EntityPersister persister = source.getEntityPersister(entityName, entity);
    Serializable generatedId = persister.getIdentifierGenerator().generate(source, entity);
    if (generatedId == null) {
      throw new IdentifierGenerationException("null id generated for:" + entity.getClass());
    } else if (generatedId == IdentifierGeneratorHelper.SHORT_CIRCUIT_INDICATOR) {
      return source.getIdentifier(entity);
    } else if (generatedId == IdentifierGeneratorHelper.POST_INSERT_INDICATOR) {
      return performSave(
          entity, null, persister, true, anything, source, requiresImmediateIdAccess);
    } else {
      // TODO: define toString()s for generators
      if (LOG.isDebugEnabled()) {
        LOG.debugf(
            "Generated identifier: %s, using strategy: %s",
            persister.getIdentifierType().toLoggableString(generatedId, source.getFactory()),
            persister.getIdentifierGenerator().getClass().getName());
      }

      return performSave(entity, generatedId, persister, false, anything, source, true);
    }
  }
コード例 #4
0
  /**
   * process any unreferenced collections and then inspect all known collections, scheduling
   * creates/removes/updates
   */
  private void flushCollections(EventSource session) throws HibernateException {

    if (LOG.isTraceEnabled()) {
      LOG.trace("Processing unreferenced collections");
    }

    List list = IdentityMap.entries(session.getPersistenceContext().getCollectionEntries());
    int size = list.size();
    for (int i = 0; i < size; i++) {
      Map.Entry me = (Map.Entry) list.get(i);
      CollectionEntry ce = (CollectionEntry) me.getValue();
      if (!ce.isReached() && !ce.isIgnore()) {
        Collections.processUnreachableCollection((PersistentCollection) me.getKey(), session);
      }
    }

    // Schedule updates to collections:

    if (LOG.isTraceEnabled()) {
      LOG.trace("Scheduling collection removes/(re)creates/updates");
    }

    list = IdentityMap.entries(session.getPersistenceContext().getCollectionEntries());
    size = list.size();
    ActionQueue actionQueue = session.getActionQueue();
    for (int i = 0; i < size; i++) {
      Map.Entry me = (Map.Entry) list.get(i);
      PersistentCollection coll = (PersistentCollection) me.getKey();
      CollectionEntry ce = (CollectionEntry) me.getValue();

      if (ce.isDorecreate()) {
        session.getInterceptor().onCollectionRecreate(coll, ce.getCurrentKey());
        actionQueue.addAction(
            new CollectionRecreateAction(
                coll, ce.getCurrentPersister(), ce.getCurrentKey(), session));
      }
      if (ce.isDoremove()) {
        session.getInterceptor().onCollectionRemove(coll, ce.getLoadedKey());
        actionQueue.addAction(
            new CollectionRemoveAction(
                coll,
                ce.getLoadedPersister(),
                ce.getLoadedKey(),
                ce.isSnapshotEmpty(coll),
                session));
      }
      if (ce.isDoupdate()) {
        session.getInterceptor().onCollectionUpdate(coll, ce.getLoadedKey());
        actionQueue.addAction(
            new CollectionUpdateAction(
                coll,
                ce.getLoadedPersister(),
                ce.getLoadedKey(),
                ce.isSnapshotEmpty(coll),
                session));
      }
    }

    actionQueue.sortCollectionActions();
  }
コード例 #5
0
  /**
   * Extract by bytes from the given stream.
   *
   * @param inputStream The stream of bytes.
   * @return The contents as a {@code byte[]}
   */
  public static byte[] extractBytes(InputStream inputStream) {
    if (BinaryStream.class.isInstance(inputStream)) {
      return ((BinaryStream) inputStream).getBytes();
    }

    // read the stream contents into a buffer and return the complete byte[]
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream(2048);
    try {
      byte[] buffer = new byte[2048];
      while (true) {
        int amountRead = inputStream.read(buffer);
        if (amountRead == -1) {
          break;
        }
        outputStream.write(buffer, 0, amountRead);
      }
    } catch (IOException ioe) {
      throw new HibernateException("IOException occurred reading a binary value", ioe);
    } finally {
      try {
        inputStream.close();
      } catch (IOException e) {
        LOG.unableToCloseInputStream(e);
      }
      try {
        outputStream.close();
      } catch (IOException e) {
        LOG.unableToCloseOutputStream(e);
      }
    }
    return outputStream.toByteArray();
  }
コード例 #6
0
  @SuppressWarnings({"unchecked"})
  public static void close(Statement statement) {
    log.tracef("Closing prepared statement [%s]", statement);

    try {
      // if we are unable to "clean" the prepared statement,
      // we do not close it
      try {
        if (statement.getMaxRows() != 0) {
          statement.setMaxRows(0);
        }
        if (statement.getQueryTimeout() != 0) {
          statement.setQueryTimeout(0);
        }
      } catch (SQLException sqle) {
        // there was a problem "cleaning" the prepared statement
        if (log.isDebugEnabled()) {
          log.debugf("Exception clearing maxRows/queryTimeout [%s]", sqle.getMessage());
        }
        // EARLY EXIT!!!
        return;
      }
      statement.close();
    } catch (SQLException e) {
      log.debugf("Unable to release JDBC statement [%s]", e.getMessage());
    } catch (Exception e) {
      // try to handle general errors more elegantly
      log.debugf("Unable to release JDBC statement [%s]", e.getMessage());
    }
  }
  @Override
  public void initialize(Serializable id, SessionImplementor session) throws HibernateException {
    if (log.isDebugEnabled()) {
      log.debugf(
          "Loading collection: %s",
          MessageHelper.collectionInfoString(collectionPersister, id, getFactory()));
    }

    final Serializable[] ids = new Serializable[] {id};
    try {
      final QueryParameters qp = new QueryParameters();
      qp.setPositionalParameterTypes(new Type[] {collectionPersister.getKeyType()});
      qp.setPositionalParameterValues(ids);
      qp.setCollectionKeys(ids);

      executeLoad(session, qp, staticLoadQuery, true, null);

    } catch (SQLException sqle) {
      throw getFactory()
          .getSQLExceptionHelper()
          .convert(
              sqle,
              "could not initialize a collection: "
                  + MessageHelper.collectionInfoString(collectionPersister, id, getFactory()),
              staticLoadQuery.getSqlStatement());
    }

    log.debug("Done loading collection");
  }
コード例 #8
0
 /**
  * Returns <code>false</code> and fails to put the value if there is an existing un-writeable item
  * mapped to this key.
  */
 @Override
 public final boolean putFromLoad(
     Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride)
     throws CacheException {
   try {
     LOG.debugf(
         "putting key[%s] -> value[%s] into region[%s]",
         key, value, getInternalRegion().getName());
     writeLock.lock();
     Lockable item = (Lockable) getInternalRegion().get(key);
     boolean writeable =
         item == null || item.isWriteable(txTimestamp, version, getVersionComparator());
     if (writeable) {
       LOG.debugf(
           "putting key[%s] -> value[%s] into region[%s] success",
           key, value, getInternalRegion().getName());
       getInternalRegion().put(key, new Item(value, version, getInternalRegion().nextTimestamp()));
       return true;
     } else {
       LOG.debugf(
           "putting key[%s] -> value[%s] into region[%s] fail due to it is unwriteable",
           key, value, getInternalRegion().getName());
       return false;
     }
   } finally {
     writeLock.unlock();
   }
 }
コード例 #9
0
 private void performExecution() {
   LOG.debugf("Executing batch size: %s", batchPosition);
   try {
     for (Map.Entry<String, PreparedStatement> entry : getStatements().entrySet()) {
       try {
         final PreparedStatement statement = entry.getValue();
         final int[] rowCounts;
         try {
           getJdbcCoordinator()
               .getJdbcSessionOwner()
               .getJdbcSessionContext()
               .getObserver()
               .jdbcExecuteBatchStart();
           rowCounts = statement.executeBatch();
         } finally {
           getJdbcCoordinator()
               .getJdbcSessionOwner()
               .getJdbcSessionContext()
               .getObserver()
               .jdbcExecuteBatchEnd();
         }
         checkRowCounts(rowCounts, statement);
       } catch (SQLException e) {
         abortBatch();
         throw sqlExceptionHelper().convert(e, "could not execute batch", entry.getKey());
       }
     }
   } catch (RuntimeException re) {
     LOG.unableToExecuteBatch(re.getMessage());
     throw re;
   } finally {
     batchPosition = 0;
   }
 }
コード例 #10
0
ファイル: SchemaExport.java プロジェクト: nladd/dayTrader
  private void execute(
      boolean script, boolean export, Writer fileOutput, Statement statement, final String sql)
      throws IOException, SQLException {
    final SqlExceptionHelper sqlExceptionHelper = new SqlExceptionHelper();

    String formatted = formatter.format(sql);
    if (delimiter != null) formatted += delimiter;
    if (script) System.out.println(formatted);
    LOG.debug(formatted);
    if (outputFile != null) {
      fileOutput.write(formatted + "\n");
    }
    if (export) {

      statement.executeUpdate(sql);
      try {
        SQLWarning warnings = statement.getWarnings();
        if (warnings != null) {
          sqlExceptionHelper.logAndClearWarnings(connectionHelper.getConnection());
        }
      } catch (SQLException sqle) {
        LOG.unableToLogSqlWarnings(sqle);
      }
    }
  }
コード例 #11
0
  /**
   * Returns <code>null</code> if the item is not readable. Locked items are not readable, nor are
   * items created after the start of this transaction.
   */
  @Override
  public final Object get(Object key, long txTimestamp) throws CacheException {
    LOG.debugf("getting key[%s] from region[%s]", key, getInternalRegion().getName());
    try {
      readLock.lock();
      Lockable item = (Lockable) getInternalRegion().get(key);

      boolean readable = item != null && item.isReadable(txTimestamp);
      if (readable) {
        LOG.debugf("hit key[%s] in region[%s]", key, getInternalRegion().getName());
        return item.getValue();
      } else {
        if (item == null) {
          LOG.debugf("miss key[%s] in region[%s]", key, getInternalRegion().getName());
        } else {
          LOG.debugf(
              "hit key[%s] in region[%s], but it is unreadable",
              key, getInternalRegion().getName());
        }
        return null;
      }
    } finally {
      readLock.unlock();
    }
  }
コード例 #12
0
 private static RegionFactory createRegionFactory(
     Properties properties, boolean cachingEnabled, ServiceRegistry serviceRegistry) {
   String regionFactoryClassName =
       RegionFactoryInitiator.mapLegacyNames(
           ConfigurationHelper.getString(
               AvailableSettings.CACHE_REGION_FACTORY, properties, null));
   if (regionFactoryClassName == null || !cachingEnabled) {
     regionFactoryClassName = DEF_CACHE_REG_FACTORY;
   }
   LOG.debugf("Cache region factory : %s", regionFactoryClassName);
   try {
     try {
       return (RegionFactory)
           serviceRegistry
               .getService(ClassLoaderService.class)
               .classForName(regionFactoryClassName)
               .getConstructor(Properties.class)
               .newInstance(properties);
     } catch (NoSuchMethodException e) {
       // no constructor accepting Properties found, try no arg constructor
       LOG.debugf(
           "%s did not provide constructor accepting java.util.Properties; attempting no-arg constructor.",
           regionFactoryClassName);
       return (RegionFactory)
           serviceRegistry
               .getService(ClassLoaderService.class)
               .classForName(regionFactoryClassName)
               .newInstance();
     }
   } catch (Exception e) {
     throw new HibernateException(
         "could not instantiate RegionFactory [" + regionFactoryClassName + "]", e);
   }
 }
コード例 #13
0
  @Override
  public void register(ResultSet resultSet, Statement statement) {
    log.tracef("Registering result set [%s]", resultSet);

    if (statement == null) {
      try {
        statement = resultSet.getStatement();
      } catch (SQLException e) {
        throw convert(e, "unable to access Statement from ResultSet");
      }
    }
    if (statement != null) {
      // Keep this at DEBUG level, rather than warn.  Numerous connection pool implementations can
      // return a
      // proxy/wrapper around the JDBC Statement, causing excessive logging here.  See HHH-8210.
      if (log.isDebugEnabled() && !xref.containsKey(statement)) {
        log.debug("ResultSet statement was not registered (on register)");
      }
      Set<ResultSet> resultSets = xref.get(statement);
      if (resultSets == null) {
        resultSets = new HashSet<ResultSet>();
        xref.put(statement, resultSets);
      }
      resultSets.add(resultSet);
    } else {
      unassociatedResultSets.add(resultSet);
    }
  }
コード例 #14
0
  @Override
  public void release(ResultSet resultSet, Statement statement) {
    log.tracef("Releasing result set [%s]", resultSet);

    if (statement == null) {
      try {
        statement = resultSet.getStatement();
      } catch (SQLException e) {
        throw convert(e, "unable to access Statement from ResultSet");
      }
    }
    if (statement != null) {
      final Set<ResultSet> resultSets = xref.get(statement);
      if (resultSets == null) {
        log.unregisteredStatement();
      } else {
        resultSets.remove(resultSet);
        if (resultSets.isEmpty()) {
          xref.remove(statement);
        }
      }
    } else {
      final boolean removed = unassociatedResultSets.remove(resultSet);
      if (!removed) {
        log.unregisteredResultSetWithoutStatement();
      }
    }
    close(resultSet);
  }
コード例 #15
0
  /**
   * Compile the query (generate the SQL).
   *
   * @throws org.hibernate.MappingException Indicates problems resolving things referenced in the
   *     query.
   * @throws org.hibernate.QueryException Generally some form of syntatic failure.
   */
  private void compile() throws QueryException, MappingException {
    LOG.trace("Compiling query");
    try {
      ParserHelper.parse(
          new PreprocessingParser(tokenReplacements),
          queryString,
          ParserHelper.HQL_SEPARATORS,
          this);
      renderSQL();
    } catch (QueryException qe) {
      qe.setQueryString(queryString);
      throw qe;
    } catch (MappingException me) {
      throw me;
    } catch (Exception e) {
      LOG.debug("Unexpected query compilation problem", e);
      e.printStackTrace();
      QueryException qe = new QueryException("Incorrect query syntax", e);
      qe.setQueryString(queryString);
      throw qe;
    }

    postInstantiate();

    compiled = true;
  }
コード例 #16
0
 public void injectSessionFactory(SessionFactoryImplementor factory) {
   if (this.factory != null) {
     LOG.scopingTypesToSessionFactoryAfterAlreadyScoped(this.factory, factory);
   } else {
     LOG.trace("Scoping types to session factory " + factory);
   }
   this.factory = factory;
 }
コード例 #17
0
  @Override
  public <T> T delegateWork(WorkExecutorVisitable<T> work, boolean transacted)
      throws HibernateException {
    boolean wasAutoCommit = false;
    try {
      // todo : should we use a connection proxy here?
      Connection connection = connectionProvider().getConnection();
      try {
        if (transacted) {
          if (connection.getAutoCommit()) {
            wasAutoCommit = true;
            connection.setAutoCommit(false);
          }
        }

        T result = work.accept(new WorkExecutor<T>(), connection);

        if (transacted) {
          connection.commit();
        }

        return result;
      } catch (Exception e) {
        try {
          if (transacted && !connection.isClosed()) {
            connection.rollback();
          }
        } catch (Exception ignore) {
          LOG.unableToRollbackConnection(ignore);
        }

        if (e instanceof HibernateException) {
          throw (HibernateException) e;
        } else if (e instanceof SQLException) {
          throw sqlExceptionHelper().convert((SQLException) e, "error performing isolated work");
        } else {
          throw new HibernateException("error performing isolated work", e);
        }
      } finally {
        if (transacted && wasAutoCommit) {
          try {
            connection.setAutoCommit(true);
          } catch (Exception ignore) {
            LOG.trace("was unable to reset connection back to auto-commit");
          }
        }
        try {
          connectionProvider().closeConnection(connection);
        } catch (Exception ignore) {
          LOG.unableToReleaseIsolatedConnection(ignore);
        }
      }
    } catch (SQLException sqle) {
      throw sqlExceptionHelper().convert(sqle, "unable to obtain isolated JDBC connection");
    }
  }
コード例 #18
0
 protected boolean invokeDeleteLifecycle(
     EventSource session, Object entity, EntityPersister persister) {
   if (persister.implementsLifecycle()) {
     LOG.debug("Calling onDelete()");
     if (((Lifecycle) entity).onDelete(session)) {
       LOG.debug("Deletion vetoed by onDelete()");
       return true;
     }
   }
   return false;
 }
コード例 #19
0
 protected boolean invokeUpdateLifecycle(
     Object entity, EntityPersister persister, EventSource source) {
   if (persister.implementsLifecycle(source.getEntityMode())) {
     LOG.debugf("Calling onUpdate()");
     if (((Lifecycle) entity).onUpdate(source)) {
       LOG.debugf("Update vetoed by onUpdate()");
       return true;
     }
   }
   return false;
 }
コード例 #20
0
ファイル: Collections.java プロジェクト: HAW-AI/D-MARLA
  private static void processDereferencedCollection(
      PersistentCollection coll, SessionImplementor session) {
    final PersistenceContext persistenceContext = session.getPersistenceContext();
    CollectionEntry entry = persistenceContext.getCollectionEntry(coll);
    final CollectionPersister loadedPersister = entry.getLoadedPersister();

    if (LOG.isDebugEnabled() && loadedPersister != null) {
      LOG.debugf(
          "Collection dereferenced: %s",
          MessageHelper.collectionInfoString(
              loadedPersister, entry.getLoadedKey(), session.getFactory()));
    }

    // do a check
    boolean hasOrphanDelete = loadedPersister != null && loadedPersister.hasOrphanDelete();
    if (hasOrphanDelete) {
      Serializable ownerId =
          loadedPersister.getOwnerEntityPersister().getIdentifier(coll.getOwner(), session);
      if (ownerId == null) {
        // the owning entity may have been deleted and its identifier unset due to
        // identifier-rollback; in which case, try to look up its identifier from
        // the persistence context
        if (session.getFactory().getSettings().isIdentifierRollbackEnabled()) {
          EntityEntry ownerEntry = persistenceContext.getEntry(coll.getOwner());
          if (ownerEntry != null) {
            ownerId = ownerEntry.getId();
          }
        }
        if (ownerId == null) {
          throw new AssertionFailure(
              "Unable to determine collection owner identifier for orphan-delete processing");
        }
      }
      EntityKey key = session.generateEntityKey(ownerId, loadedPersister.getOwnerEntityPersister());
      Object owner = persistenceContext.getEntity(key);
      if (owner == null) {
        throw new AssertionFailure(
            "collection owner not associated with session: " + loadedPersister.getRole());
      }
      EntityEntry e = persistenceContext.getEntry(owner);
      // only collections belonging to deleted entities are allowed to be dereferenced in the case
      // of orphan delete
      if (e != null && e.getStatus() != Status.DELETED && e.getStatus() != Status.GONE) {
        throw new HibernateException(
            "A collection with cascade=\"all-delete-orphan\" was no longer referenced by the owning entity instance: "
                + loadedPersister.getRole());
      }
    }

    // do the work
    entry.setCurrentPersister(null);
    entry.setCurrentKey(null);
    prepareCollectionForUpdate(coll, entry, session.getFactory());
  }
コード例 #21
0
 /**
  * Generate the next increment in the optimistic locking value according the {@link VersionType}
  * contract for the version property.
  *
  * @param version The current version
  * @param versionType The version type
  * @param session The originating session
  * @return The incremented optimistic locking value.
  */
 @SuppressWarnings("unchecked")
 public static Object increment(
     Object version, VersionType versionType, SharedSessionContractImplementor session) {
   final Object next = versionType.next(version, session);
   if (LOG.isTraceEnabled()) {
     LOG.tracef(
         "Incrementing: %s to %s",
         versionType.toLoggableString(version, session.getFactory()),
         versionType.toLoggableString(next, session.getFactory()));
   }
   return next;
 }
コード例 #22
0
  @SuppressWarnings({"unchecked"})
  public static void close(ResultSet resultSet) {
    log.tracef("Closing result set [%s]", resultSet);

    try {
      resultSet.close();
    } catch (SQLException e) {
      log.debugf("Unable to release JDBC result set [%s]", e.getMessage());
    } catch (Exception e) {
      // try to handle general errors more elegantly
      log.debugf("Unable to release JDBC result set [%s]", e.getMessage());
    }
  }
コード例 #23
0
 protected boolean invokeSaveLifecycle(
     Object entity, EntityPersister persister, EventSource source) {
   // Sub-insertions should occur before containing insertion so
   // Try to do the callback now
   if (persister.implementsLifecycle()) {
     LOG.debug("Calling onSave()");
     if (((Lifecycle) entity).onSave(source)) {
       LOG.debug("Insertion vetoed by onSave()");
       return true;
     }
   }
   return false;
 }
コード例 #24
0
ファイル: DbTimestampType.java プロジェクト: enenuki/phd
 /*  34:    */
 /*  35:    */ public Date seed(SessionImplementor session) /*  36:    */ {
   /*  37: 67 */ if (session == null)
   /*  38:    */ {
     /*  39: 68 */ LOG.trace("Incoming session was null; using current jvm time");
     /*  40: 69 */ return super.seed(session);
     /*  41:    */ }
   /*  42: 71 */ if (!session.getFactory().getDialect().supportsCurrentTimestampSelection())
   /*  43:    */ {
     /*  44: 72 */ LOG.debug(
         "Falling back to vm-based timestamp, as dialect does not support current timestamp selection");
     /*  45: 73 */ return super.seed(session);
     /*  46:    */ }
   /*  47: 76 */ return getCurrentTimestamp(session);
   /*  48:    */ }
コード例 #25
0
  /**
   * Handle the given dirty-check event.
   *
   * @param event The dirty-check event to be handled.
   * @throws HibernateException
   */
  public void onDirtyCheck(DirtyCheckEvent event) throws HibernateException {

    int oldSize = event.getSession().getActionQueue().numberOfCollectionRemovals();

    try {
      flushEverythingToExecutions(event);
      boolean wasNeeded = event.getSession().getActionQueue().hasAnyQueuedActions();
      if (wasNeeded) LOG.debugf("Session dirty");
      else LOG.debugf("Session not dirty");
      event.setDirty(wasNeeded);
    } finally {
      event.getSession().getActionQueue().clearFromFlushNeededCheck(oldSize);
    }
  }
コード例 #26
0
 /**
  * Create the subselect fetch query fragment for the provided {@link QueryParameters} with SELECT
  * and ORDER BY clauses removed.
  *
  * @param queryParameters -the query parameters.
  * @return the subselect fetch query fragment.
  */
 public static String createSubselectFetchQueryFragment(QueryParameters queryParameters) {
   // TODO: ugly here:
   final String queryString = queryParameters.getFilteredSQL();
   final int fromIndex = getFromIndex(queryString);
   final int orderByIndex = queryString.lastIndexOf("order by");
   final String subselectQueryFragment =
       orderByIndex > 0
           ? queryString.substring(fromIndex, orderByIndex)
           : queryString.substring(fromIndex);
   if (LOG.isTraceEnabled()) {
     LOG.tracef("SubselectFetch query fragment: %s", subselectQueryFragment);
   }
   return subselectQueryFragment;
 }
コード例 #27
0
 private void logDirtyProperties(
     Serializable id, int[] dirtyProperties, EntityPersister persister) {
   if (LOG.isTraceEnabled() && dirtyProperties != null && dirtyProperties.length > 0) {
     final String[] allPropertyNames = persister.getPropertyNames();
     final String[] dirtyPropertyNames = new String[dirtyProperties.length];
     for (int i = 0; i < dirtyProperties.length; i++) {
       dirtyPropertyNames[i] = allPropertyNames[dirtyProperties[i]];
     }
     LOG.trace(
         "Found dirty properties ["
             + MessageHelper.infoString(persister.getEntityName(), id)
             + "] : "
             + dirtyPropertyNames);
   }
 }
コード例 #28
0
 /**
  * We encountered a delete request on a transient instance.
  *
  * <p>This is a deviation from historical Hibernate (pre-3.2) behavior to align with the JPA spec,
  * which states that transient entities can be passed to remove operation in which case cascades
  * still need to be performed.
  *
  * @param session The session which is the source of the event
  * @param entity The entity being delete processed
  * @param cascadeDeleteEnabled Is cascading of deletes enabled
  * @param persister The entity persister
  * @param transientEntities A cache of already visited transient entities (to avoid infinite
  *     recursion).
  */
 protected void deleteTransientEntity(
     EventSource session,
     Object entity,
     boolean cascadeDeleteEnabled,
     EntityPersister persister,
     Set transientEntities) {
   LOG.handlingTransientEntity();
   if (transientEntities.contains(entity)) {
     LOG.trace("Already handled transient entity; skipping");
     return;
   }
   transientEntities.add(entity);
   cascadeBeforeDelete(session, persister, entity, null, transientEntities);
   cascadeAfterDelete(session, persister, entity, transientEntities);
 }
コード例 #29
0
  /**
   * Coordinates the processing necessary to get things ready for executions as db calls by preping
   * the session caches and moving the appropriate entities and collections to their respective
   * execution queues.
   *
   * @param event The flush event.
   * @throws HibernateException Error flushing caches to execution queues.
   */
  protected void flushEverythingToExecutions(FlushEvent event) throws HibernateException {

    LOG.trace("Flushing session");

    EventSource session = event.getSession();

    final PersistenceContext persistenceContext = session.getPersistenceContext();
    session.getInterceptor().preFlush(new LazyIterator(persistenceContext.getEntitiesByKey()));

    prepareEntityFlushes(session, persistenceContext);
    // we could move this inside if we wanted to
    // tolerate collection initializations during
    // collection dirty checking:
    prepareCollectionFlushes(persistenceContext);
    // now, any collections that are initialized
    // inside this block do not get updated - they
    // are ignored until the next flush

    persistenceContext.setFlushing(true);
    try {
      int entityCount = flushEntities(event, persistenceContext);
      int collectionCount = flushCollections(session, persistenceContext);

      event.setNumberOfEntitiesProcessed(entityCount);
      event.setNumberOfCollectionsProcessed(collectionCount);
    } finally {
      persistenceContext.setFlushing(false);
    }

    // some statistics
    logFlushResults(event);
  }
コード例 #30
0
  /**
   * 1. Recreate the collection key -> collection map 2. rebuild the collection entries 3. call
   * Interceptor.postFlush()
   */
  protected void postFlush(SessionImplementor session) throws HibernateException {

    LOG.trace("Post flush");

    final PersistenceContext persistenceContext = session.getPersistenceContext();
    persistenceContext.getCollectionsByKey().clear();

    // the database has changed now, so the subselect results need to be invalidated
    // the batch fetching queues should also be cleared - especially the collection batch fetching
    // one
    persistenceContext.getBatchFetchQueue().clear();

    for (Map.Entry<PersistentCollection, CollectionEntry> me :
        IdentityMap.concurrentEntries(persistenceContext.getCollectionEntries())) {
      CollectionEntry collectionEntry = me.getValue();
      PersistentCollection persistentCollection = me.getKey();
      collectionEntry.postFlush(persistentCollection);
      if (collectionEntry.getLoadedPersister() == null) {
        // if the collection is dereferenced, unset its session reference and remove from the
        // session cache
        // iter.remove(); //does not work, since the entrySet is not backed by the set
        persistentCollection.unsetSession(session);
        persistenceContext.getCollectionEntries().remove(persistentCollection);
      } else {
        // otherwise recreate the mapping between the collection and its key
        CollectionKey collectionKey =
            new CollectionKey(collectionEntry.getLoadedPersister(), collectionEntry.getLoadedKey());
        persistenceContext.getCollectionsByKey().put(collectionKey, persistentCollection);
      }
    }
  }