@Override public void delete(Serializable id, Object version, Object object, SessionImplementor session) throws HibernateException { final int span = getTableSpan(); if (span > 1) { throw new HibernateException( "Hibernate OGM does not yet support entities spanning multiple tables"); } final EntityMetamodel entityMetamodel = getEntityMetamodel(); boolean isImpliedOptimisticLocking = !entityMetamodel.isVersioned() && isAllOrDirtyOptLocking(); Object[] loadedState = null; if (isImpliedOptimisticLocking) { // need to treat this as if it where optimistic-lock="all" (dirty does *not* make sense); // first we need to locate the "loaded" state // // Note, it potentially could be a proxy, so doAfterTransactionCompletion the location the // safe way... org.hibernate.engine.spi.EntityKey key = session.generateEntityKey(id, this); Object entity = session.getPersistenceContext().getEntity(key); if (entity != null) { EntityEntry entry = session.getPersistenceContext().getEntry(entity); loadedState = entry.getLoadedState(); } } final EntityKey key = EntityKeyBuilder.fromPersister(this, id, session); final Tuple resultset = gridDialect.getTuple(key, this.getTupleContext()); final SessionFactoryImplementor factory = getFactory(); if (isImpliedOptimisticLocking && loadedState != null) { // we need to utilize dynamic delete statements for (int j = span - 1; j >= 0; j--) { boolean[] versionability = getPropertyVersionability(); // TODO do a diff on the properties value from resultset GridType[] types = gridPropertyTypes; for (int i = 0; i < entityMetamodel.getPropertySpan(); i++) { boolean include = isPropertyOfTable(i, j) && versionability[i]; if (include) { final GridType type = types[i]; final Object snapshotValue = type.nullSafeGet(resultset, getPropertyColumnNames(i), session, object); // TODO support other entity modes if (!type.isEqual(loadedState[i], snapshotValue, factory)) { if (factory.getStatistics().isStatisticsEnabled()) { factory.getStatisticsImplementor().optimisticFailure(getEntityName()); } throw new StaleObjectStateException(getEntityName(), id); } } } } } else { if (entityMetamodel.isVersioned()) { checkVersionAndRaiseSOSE(id, version, session, resultset); } } for (int j = span - 1; j >= 0; j--) { if (isInverseTable(j)) { return; } if (log.isTraceEnabled()) { log.trace("Deleting entity: " + MessageHelper.infoString(this, id, factory)); if (j == 0 && isVersioned()) { log.trace("Version: " + version); } } // delete association information // needs to be executed before the tuple removal because the AtomicMap in ISPN is cleared upon // removal new EntityDehydrator() .gridDialect(gridDialect) .gridPropertyTypes(gridPropertyTypes) .gridIdentifierType(gridIdentifierType) .id(id) .persister(this) .resultset(resultset) .session(session) .tableIndex(j) .onlyRemovePropertyMetadata() .dehydrate(); gridDialect.removeTuple(key, getTupleContext()); } }
/** * Perform the entity deletion. Well, as with most operations, does not really perform it; just * schedules an action/execution with the {@link org.hibernate.engine.spi.ActionQueue} for * execution during flush. * * @param session The originating session * @param entity The entity to delete * @param entityEntry The entity's entry in the {@link PersistenceContext} * @param isCascadeDeleteEnabled Is delete cascading enabled? * @param persister The entity persister. * @param transientEntities A cache of already deleted entities. */ protected final void deleteEntity( final EventSource session, final Object entity, final EntityEntry entityEntry, final boolean isCascadeDeleteEnabled, final boolean isOrphanRemovalBeforeUpdates, final EntityPersister persister, final Set transientEntities) { if (LOG.isTraceEnabled()) { LOG.tracev( "Deleting {0}", MessageHelper.infoString(persister, entityEntry.getId(), session.getFactory())); } final PersistenceContext persistenceContext = session.getPersistenceContext(); final Type[] propTypes = persister.getPropertyTypes(); final Object version = entityEntry.getVersion(); final Object[] currentState; if (entityEntry.getLoadedState() == null) { // ie. the entity came in from update() currentState = persister.getPropertyValues(entity); } else { currentState = entityEntry.getLoadedState(); } final Object[] deletedState = createDeletedState(persister, currentState, session); entityEntry.setDeletedState(deletedState); session .getInterceptor() .onDelete( entity, entityEntry.getId(), deletedState, persister.getPropertyNames(), propTypes); // before any callbacks, etc, so subdeletions see that this deletion happened first persistenceContext.setEntryStatus(entityEntry, Status.DELETED); final EntityKey key = session.generateEntityKey(entityEntry.getId(), persister); cascadeBeforeDelete(session, persister, entity, entityEntry, transientEntities); new ForeignKeys.Nullifier(entity, true, false, session) .nullifyTransientReferences(entityEntry.getDeletedState(), propTypes); new Nullability(session).checkNullability(entityEntry.getDeletedState(), persister, true); persistenceContext.getNullifiableEntityKeys().add(key); if (isOrphanRemovalBeforeUpdates) { // TODO: The removeOrphan concept is a temporary "hack" for HHH-6484. This should be removed // once action/task // ordering is improved. session .getActionQueue() .addAction( new OrphanRemovalAction( entityEntry.getId(), deletedState, version, entity, persister, isCascadeDeleteEnabled, session)); } else { // Ensures that containing deletions happen before sub-deletions session .getActionQueue() .addAction( new EntityDeleteAction( entityEntry.getId(), deletedState, version, entity, persister, isCascadeDeleteEnabled, session)); } cascadeAfterDelete(session, persister, entity, transientEntities); // the entry will be removed after the flush, and will no longer // override the stale snapshot // This is now handled by removeEntity() in EntityDeleteAction // persistenceContext.removeDatabaseSnapshot(key); }