/** * INTERNAL: Get the appropriate attribute value from the object and put it in the appropriate * field of the database row. Loop through the reference objects and extract the primary keys and * put them in the vector of "nested" rows. */ public void writeFromObjectIntoRow(Object object, AbstractRecord row, AbstractSession session) { if (!isForeignKeyRelationship) { return; } if (((getSourceForeignKeysToTargetKeys()) == null) || (getSourceForeignKeysToTargetKeys().size() == 0)) { return; } if (this.isReadOnly()) { return; } AbstractRecord referenceRow = this.getIndirectionPolicy().extractReferenceRow(this.getAttributeValueFromObject(object)); if (referenceRow != null) { // the reference objects have not been instantiated - use the value from the original row if (getForeignKeyGroupingElement() != null) { row.put( this.getForeignKeyGroupingElement(), referenceRow.getValues(this.getForeignKeyGroupingElement())); } else if (getSourceForeignKeyFields().size() > 0) { DatabaseField foreignKeyField = (DatabaseField) getSourceForeignKeyFields().get(0); row.put(foreignKeyField, referenceRow.getValues(foreignKeyField)); } return; } ContainerPolicy cp = this.getContainerPolicy(); // extract the keys from the objects Object attributeValue = this.getRealCollectionAttributeValueFromObject(object, session); Vector nestedRows = new Vector(cp.sizeFor(attributeValue)); if (getForeignKeyGroupingElement() != null) { for (Object iter = cp.iteratorFor(attributeValue); cp.hasNext(iter); ) { XMLRecord nestedRow = this.extractKeyRowFromReferenceObject(cp.next(iter, session), session, (XMLRecord) row); nestedRows.addElement(nestedRow); } row.add(this.getForeignKeyGroupingElement(), nestedRows); } else { DatabaseField singleField = (DatabaseField) getSourceForeignKeyFields().get(0); DatabaseField pkField = (DatabaseField) getSourceForeignKeysToTargetKeys().get(singleField); for (Object iter = cp.iteratorFor(attributeValue); cp.hasNext(iter); ) { Object singleValue = getReferenceDescriptor() .getObjectBuilder() .extractValueFromObjectForField(cp.next(iter, session), pkField, session); row.add(singleField, singleValue); } } }
/** INTERNAL: Write the attribute value from the object to the row. */ public void writeFromObjectIntoRow(Object object, AbstractRecord row, AbstractSession session) { // for each xmlField on this mapping for (Iterator fieldIt = getFields().iterator(); fieldIt.hasNext(); ) { XMLField xmlField = (XMLField) fieldIt.next(); ContainerPolicy cp = getContainerPolicy(); Object collection = getAttributeAccessor().getAttributeValueFromObject(object); if (collection == null) { return; } Object fieldValue; Object objectValue; String stringValue = ""; QName schemaType; Object iterator = cp.iteratorFor(collection); if (usesSingleNode()) { while (cp.hasNext(iterator)) { objectValue = cp.next(iterator, session); fieldValue = buildFieldValue(objectValue, xmlField, session); if (fieldValue != null) { schemaType = getSchemaType(xmlField, fieldValue, session); String newValue = getValueToWrite(schemaType, fieldValue, session); if (newValue != null) { stringValue += newValue; if (cp.hasNext(iterator)) { stringValue += SPACE; } } } } if (!(stringValue.equals(""))) { row.put(xmlField, stringValue); } } else { ArrayList keyValues = new ArrayList(); while (cp.hasNext(iterator)) { objectValue = cp.next(iterator, session); fieldValue = buildFieldValue(objectValue, xmlField, session); if (fieldValue != null) { schemaType = getSchemaType(xmlField, fieldValue, session); stringValue = getValueToWrite(schemaType, fieldValue, session); // row.add(xmlField, stringValue); keyValues.add(stringValue); } } row.put(xmlField, keyValues); } } }
/** INTERNAL: Delete the reference objects. */ public void preDelete(DeleteObjectQuery query) throws DatabaseException, OptimisticLockException { if (isForeignKeyRelationship()) { return; } if (!this.shouldObjectModifyCascadeToParts(query)) { return; } Object objects = this.getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession()); ContainerPolicy cp = this.getContainerPolicy(); // if privately-owned parts have their privately-owned sub-parts, delete them one by one; // else delete everything in one shot if (this.mustDeleteReferenceObjectsOneByOne()) { for (Object iter = cp.iteratorFor(objects); cp.hasNext(iter); ) { DeleteObjectQuery deleteQuery = new DeleteObjectQuery(); deleteQuery.setIsExecutionClone(true); deleteQuery.setObject(cp.next(iter, query.getSession())); deleteQuery.setCascadePolicy(query.getCascadePolicy()); query.getSession().executeQuery(deleteQuery); } if (!query.getSession().isUnitOfWork()) { // This deletes any objects on the database, as the collection in memory may have been // changed. // This is not required for unit of work, as the update would have already deleted these // objects, // and the backup copy will include the same objects causing double deletes. this.deleteReferenceObjectsLeftOnDatabase(query); } } else { this.deleteAll(query); } }
/** Compare the attributes. Return true if they are alike. Ignore the order of the elements. */ private boolean compareAttributeValuesWithoutOrder( Object collection1, Object collection2, AbstractSession session) { ContainerPolicy cp = this.getContainerPolicy(); Vector vector2 = cp.vectorFor(collection2, session); // "clone" it so we can clear out the slots for (Object iter1 = cp.iteratorFor(collection1); cp.hasNext(iter1); ) { Object element1 = cp.next(iter1, session); boolean found = false; for (int i = 0; i < vector2.size(); i++) { if (this.compareElements(element1, vector2.elementAt(i), session)) { found = true; vector2.setElementAt(XXX, i); // clear out the matching element break; // matching element found - skip the rest of them } } if (!found) { return false; } } // look for elements that were not in collection1 for (Enumeration stream = vector2.elements(); stream.hasMoreElements(); ) { if (stream.nextElement() != XXX) { return false; } } return true; }
/** * This method will make sure that all the records privately owned by this mapping are actually * removed. If such records are found then those are all read and removed one by one along with * their privately owned parts. */ protected void deleteReferenceObjectsLeftOnDatabase(DeleteObjectQuery query) throws DatabaseException, OptimisticLockException { Object objects = this.readPrivateOwnedForObject(query); // delete all these objects one by one ContainerPolicy cp = this.getContainerPolicy(); for (Object iter = cp.iteratorFor(objects); cp.hasNext(iter); ) { query.getSession().deleteObject(cp.next(iter, query.getSession())); } }
/** * Merge changes from the source to the target object. Make the necessary removals and adds and * map key modifications. */ private void mergeChangesIntoObjectWithoutOrder( Object target, ChangeRecord changeRecord, Object source, MergeManager mergeManager) { EISCollectionChangeRecord sdkChangeRecord = (EISCollectionChangeRecord) changeRecord; ContainerPolicy cp = this.getContainerPolicy(); AbstractSession session = mergeManager.getSession(); Object targetCollection = null; if (sdkChangeRecord.getOwner().isNew()) { targetCollection = cp.containerInstance(sdkChangeRecord.getAdds().size()); } else { targetCollection = this.getRealCollectionAttributeValueFromObject(target, session); } Vector removes = sdkChangeRecord.getRemoves(); Vector adds = sdkChangeRecord.getAdds(); Vector changedMapKeys = sdkChangeRecord.getChangedMapKeys(); synchronized (targetCollection) { for (Enumeration stream = removes.elements(); stream.hasMoreElements(); ) { Object removeElement = this.buildRemovedElementFromChangeSet(stream.nextElement(), mergeManager); Object targetElement = null; for (Object iter = cp.iteratorFor(targetCollection); cp.hasNext(iter); ) { targetElement = cp.next(iter, session); if (this.compareElements(targetElement, removeElement, session)) { break; // matching element found - skip the rest of them } } if (targetElement != null) { // a matching element was found, remove it cp.removeFrom(targetElement, targetCollection, session); } } for (Enumeration stream = adds.elements(); stream.hasMoreElements(); ) { Object addElement = this.buildAddedElementFromChangeSet(stream.nextElement(), mergeManager); cp.addInto(addElement, targetCollection, session); } for (Enumeration stream = changedMapKeys.elements(); stream.hasMoreElements(); ) { Object changedMapKeyElement = this.buildAddedElementFromChangeSet(stream.nextElement(), mergeManager); Object originalElement = ((UnitOfWorkImpl) session).getOriginalVersionOfObject(changedMapKeyElement); cp.removeFrom(originalElement, targetCollection, session); cp.addInto(changedMapKeyElement, targetCollection, session); } } // reset the attribute to allow for set method to re-morph changes if the collection is not // being stored directly this.setRealAttributeValueInObject(target, targetCollection); }
/** INTERNAL: Insert privately owned parts */ public void preInsert(WriteObjectQuery query) throws DatabaseException, OptimisticLockException { if (!this.isForeignKeyRelationship()) { return; } if (!this.shouldObjectModifyCascadeToParts(query)) { return; } // only cascade dependents in UOW if (query.shouldCascadeOnlyDependentParts()) { return; } Object objects = this.getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession()); // insert each object one by one ContainerPolicy cp = this.getContainerPolicy(); for (Object iter = cp.iteratorFor(objects); cp.hasNext(iter); ) { Object object = cp.next(iter, query.getSession()); if (this.isPrivateOwned()) { // no need to set changeset here as insert is just a copy of the object anyway InsertObjectQuery insertQuery = new InsertObjectQuery(); insertQuery.setIsExecutionClone(true); insertQuery.setObject(object); insertQuery.setCascadePolicy(query.getCascadePolicy()); query.getSession().executeQuery(insertQuery); } else { // This will happen in a unit of work or cascaded query. // This is done only for persistence by reachability and is not required if the targets are // in the queue anyway // Avoid cycles by checking commit manager, this is allowed because there is no dependency. if (!query.getSession().getCommitManager().isCommitInPreModify(object)) { WriteObjectQuery writeQuery = new WriteObjectQuery(); writeQuery.setIsExecutionClone(true); if (query.getSession().isUnitOfWork()) { UnitOfWorkChangeSet uowChangeSet = (UnitOfWorkChangeSet) ((UnitOfWorkImpl) query.getSession()).getUnitOfWorkChangeSet(); if (uowChangeSet != null) { writeQuery.setObjectChangeSet( (ObjectChangeSet) uowChangeSet.getObjectChangeSetForClone(object)); } } writeQuery.setObject(object); writeQuery.setCascadePolicy(query.getCascadePolicy()); query.getSession().executeQuery(writeQuery); } } } }
/** INTERNAL: Used to verify whether the specified object is deleted or not. */ public boolean verifyDelete(Object object, AbstractSession session) throws DatabaseException { if (this.isPrivateOwned()) { Object objects = this.getRealCollectionAttributeValueFromObject(object, session); ContainerPolicy containerPolicy = getContainerPolicy(); for (Object iter = containerPolicy.iteratorFor(objects); containerPolicy.hasNext(iter); ) { if (!session.verifyDelete(containerPolicy.next(iter, session))) { return false; } } } return true; }
/** * Compare the attributes. Return true if they are alike. The order of the elements is * significant. */ private boolean compareAttributeValuesWithOrder( Object collection1, Object collection2, AbstractSession session) { ContainerPolicy cp = this.getContainerPolicy(); Object iter1 = cp.iteratorFor(collection1); Object iter2 = cp.iteratorFor(collection2); while (cp.hasNext(iter1)) { if (!this.compareElements(cp.next(iter1, session), cp.next(iter2, session), session)) { return false; } } return true; }
/** INTERNAL: */ @Override public void writeFromObjectIntoRow( Object object, AbstractRecord row, AbstractSession session, WriteType writeType) throws DescriptorException { if (this.isReadOnly()) { return; } Object attributeValue = this.getAttributeValueFromObject(object); if (attributeValue == null) { row.put(this.getField(), null); return; } ContainerPolicy cp = this.getContainerPolicy(); Vector nestedRows = new Vector(cp.sizeFor(attributeValue)); Object iter = cp.iteratorFor(attributeValue); if (null != iter) { while (cp.hasNext(iter)) { Object element = cp.next(iter, session); // convert the value - if necessary element = convertObjectValueToDataValue(element, session, ((XMLRecord) row).getMarshaller()); if (element == null) { XMLNullRepresentationType nullRepresentation = getNullPolicy().getMarshalNullRepresentation(); if (nullRepresentation == XMLNullRepresentationType.XSI_NIL) { nestedRows.add(XMLRecord.NIL); } else if (nullRepresentation == XMLNullRepresentationType.EMPTY_NODE) { Node emptyNode = XPathEngine.getInstance() .createUnownedElement(((XMLRecord) row).getDOM(), (XMLField) field); DOMRecord nestedRow = new DOMRecord(emptyNode); nestedRows.add(nestedRow); } } else { nestedRows.addElement(buildCompositeRow(element, session, row, writeType)); } } } Object fieldValue = null; if (!nestedRows.isEmpty()) { fieldValue = this.getDescriptor() .buildFieldValueFromNestedRows(nestedRows, getStructureName(), session); } row.put(this.getField(), fieldValue); }
/** * Build and return the change record that results from comparing the two collection attributes. * Ignore the order of the elements. */ private ChangeRecord compareAttributeValuesForChangeWithoutOrder( Object cloneCollection, Object backupCollection, ObjectChangeSet owner, AbstractSession session) { ContainerPolicy cp = this.getContainerPolicy(); Vector backupVector = cp.vectorFor(backupCollection, session); // "clone" it so we can clear out the slots EISCollectionChangeRecord changeRecord = new EISCollectionChangeRecord(owner, this.getAttributeName(), this.getDatabaseMapping()); for (Object cloneIter = cp.iteratorFor(cloneCollection); cp.hasNext(cloneIter); ) { Object cloneElement = cp.next(cloneIter, session); boolean found = false; for (int i = 0; i < backupVector.size(); i++) { if (this.compareElementsForChange(cloneElement, backupVector.elementAt(i), session)) { // the clone element was found in the backup collection found = true; backupVector.setElementAt(XXX, i); // clear out the matching backup element if (this.mapKeyHasChanged(cloneElement, session)) { changeRecord.addChangedMapKeyChangeSet( this.buildChangeSet(cloneElement, owner, session)); } break; // matching backup element found - skip the rest of them } } if (!found) { // the clone element was not found, so it must have been added changeRecord.addAddedChangeSet(this.buildChangeSet(cloneElement, owner, session)); } } for (int i = 0; i < backupVector.size(); i++) { Object backupElement = backupVector.elementAt(i); if (backupElement != XXX) { // the backup element was not in the clone collection, so it must have been removed changeRecord.addRemovedChangeSet(this.buildChangeSet(backupElement, owner, session)); } } if (changeRecord.hasChanges()) { return changeRecord; } else { return null; } }
/** * For isMany=false properties set the value to null. For isMany=true set the value to an empty * container of the appropriate type. */ public void unsetDeclaredProperty(int propertyIndex) { SDOProperty declaredProperty = (SDOProperty) dataObject.getType().getDeclaredProperties().get(propertyIndex); Mapping mapping = this.getJAXBMappingForProperty(declaredProperty); if (declaredProperty.isMany()) { ContainerMapping containerMapping = (ContainerMapping) mapping; ContainerPolicy containerPolicy = containerMapping.getContainerPolicy(); // OLD VALUE if (mapping.isAbstractCompositeCollectionMapping()) { XMLCompositeCollectionMapping compositeMapping = (XMLCompositeCollectionMapping) mapping; if (compositeMapping.getContainerAccessor() != null) { Object oldContainer = mapping.getAttributeValueFromObject(entity); if (oldContainer != null) { AbstractSession session = ((JAXBContext) jaxbHelperContext.getJAXBContext()) .getXMLContext() .getSession(entity); Object iterator = containerPolicy.iteratorFor(oldContainer); while (containerPolicy.hasNext(iterator)) { Object oldValue = containerPolicy.next(iterator, session); compositeMapping.getContainerAccessor().setAttributeValueInObject(oldValue, null); } } } } // NEW VALUE Object container = containerPolicy.containerInstance(); mapping.getAttributeAccessor().setAttributeValueInObject(entity, container); } else { // OLD VALUE Object oldValue = mapping.getAttributeAccessor().getAttributeValueFromObject(entity); if (mapping.isAbstractCompositeObjectMapping()) { XMLCompositeObjectMapping compositeMapping = (XMLCompositeObjectMapping) mapping; if (compositeMapping.getContainerAccessor() != null) { if (oldValue != null) { compositeMapping.getContainerAccessor().setAttributeValueInObject(oldValue, null); } } } // NEW VALUE mapping.getAttributeAccessor().setAttributeValueInObject(entity, null); } }
public boolean marshal( XPathFragment xPathFragment, MarshalRecord marshalRecord, Object object, AbstractSession session, NamespaceResolver namespaceResolver) { if (xmlBinaryDataCollectionMapping.isReadOnly()) { return false; } Object collection = xmlBinaryDataCollectionMapping.getAttributeAccessor().getAttributeValueFromObject(object); if (null == collection) { return false; } String xopPrefix = null; // If the field's resolver is non-null and has an entry for XOP, // use it - otherwise, create a new resolver, set the XOP entry, // on it, and use it instead. // We do this to avoid setting the XOP namespace declaration on // a given field or descriptor's resolver, as it is only required // on the current element if (namespaceResolver != null) { xopPrefix = namespaceResolver.resolveNamespaceURI(XMLConstants.XOP_URL); } if (xopPrefix == null || namespaceResolver == null) { xopPrefix = XMLConstants.XOP_PREFIX; marshalRecord.getNamespaceResolver().put(xopPrefix, XMLConstants.XOP_URL); } ContainerPolicy cp = getContainerPolicy(); Object iterator = cp.iteratorFor(collection); while (cp.hasNext(iterator)) { Object objectValue = cp.next(iterator, session); marshalSingleValue( xPathFragment, marshalRecord, object, objectValue, session, namespaceResolver, ObjectMarshalContext.getInstance()); } marshalRecord.getNamespaceResolver().removeNamespace(XMLConstants.XOP_PREFIX); return true; }
/** * INTERNAL: Merge changes from the source to the target object. Simply replace the entire target * collection. */ public void mergeIntoObject( Object target, boolean isTargetUnInitialized, Object source, MergeManager mergeManager) { ContainerPolicy cp = this.getContainerPolicy(); AbstractSession session = mergeManager.getSession(); Object sourceCollection = this.getRealCollectionAttributeValueFromObject(source, session); Object targetCollection = cp.containerInstance(cp.sizeFor(sourceCollection)); for (Object iter = cp.iteratorFor(sourceCollection); cp.hasNext(iter); ) { Object targetElement = this.buildElementFromElement(cp.next(iter, session), mergeManager); cp.addInto(targetElement, targetCollection, session); } // reset the attribute to allow for set method to re-morph changes if the collection is not // being stored directly this.setRealAttributeValueInObject(target, targetCollection); }
/** * INTERNAL: Execute the query. If there are cached results return those. This must override the * super to support result caching. * * @param session - the session in which the receiver will be executed. * @return An object or vector, the result of executing the query. * @exception DatabaseException - an error has occurred on the database */ public Object execute(AbstractSession session, AbstractRecord row) throws DatabaseException { if (shouldCacheQueryResults()) { if (getContainerPolicy().overridesRead()) { throw QueryException.cannotCacheCursorResultsOnQuery(this); } if (shouldConformResultsInUnitOfWork()) { throw QueryException.cannotConformAndCacheQueryResults(this); } if (isPrepared()) { // only prepared queries can have cached results. Object queryResults = getQueryResults(session, row, true); if (queryResults != null) { if (QueryMonitor.shouldMonitor()) { QueryMonitor.incrementReadAllHits(this); } // bug6138532 - check for "cached no results" (InvalidObject singleton) in query // results, and return an empty container instance as configured if (queryResults == InvalidObject.instance) { return getContainerPolicy().containerInstance(0); } Collection results = (Collection) queryResults; if (session.isUnitOfWork()) { ContainerPolicy policy = getContainerPolicy(); Object resultCollection = policy.containerInstance(results.size()); Object iterator = policy.iteratorFor(results); while (policy.hasNext(iterator)) { Object result = ((UnitOfWorkImpl) session) .registerExistingObject(policy.next(iterator, session), this.descriptor); policy.addInto(result, resultCollection, session); } return resultCollection; } return results; } } } if (QueryMonitor.shouldMonitor()) { QueryMonitor.incrementReadAllMisses(this); } return super.execute(session, row); }
/** INTERNAL: Delete the reference objects. */ @Override public void postDelete(DeleteObjectQuery query) throws DatabaseException, OptimisticLockException { if (!isForeignKeyRelationship()) { return; } if (!this.shouldObjectModifyCascadeToParts(query)) { return; } Object referenceObjects = this.getRealCollectionAttributeValueFromObject(query.getObject(), query.getSession()); // if we have a custom delete all query, use it; // otherwise, delete the reference objects one by one if (this.hasCustomDeleteAllQuery()) { this.deleteAll(query, referenceObjects); } else { ContainerPolicy cp = this.getContainerPolicy(); for (Object iter = cp.iteratorFor(referenceObjects); cp.hasNext(iter); ) { DeleteObjectQuery deleteQuery = new DeleteObjectQuery(); deleteQuery.setIsExecutionClone(true); deleteQuery.setObject(cp.next(iter, query.getSession())); deleteQuery.setCascadePolicy(query.getCascadePolicy()); query.getSession().executeQuery(deleteQuery); } if (!query.getSession().isUnitOfWork()) { // This deletes any objects on the database, as the collection in memory may have been // changed. // This is not required for unit of work, as the update would have already deleted these // objects, // and the backup copy will include the same objects, causing double deletes. this.deleteReferenceObjectsLeftOnDatabase(query); } } }
public void writeFromObjectIntoRow(Object object, AbstractRecord row, AbstractSession session) { XMLRecord record = (XMLRecord) row; XMLMarshaller marshaller = record.getMarshaller(); Object attributeValue = getAttributeValueFromObject(object); ContainerPolicy cp = this.getContainerPolicy(); Vector elements = new Vector(cp.sizeFor(attributeValue)); XMLField field = (XMLField) getField(); NamespaceResolver resolver = field.getNamespaceResolver(); boolean isAttribute = field.getLastXPathFragment().isAttribute(); String prefix = null; XMLField includeField = null; if (!isAttribute) { if (record.isXOPPackage() && !isSwaRef() && !shouldInlineBinaryData()) { field = (XMLField) getField(); // If the field's resolver is non-null and has an entry for XOP, // use it - otherwise, create a new resolver, set the XOP entry, // on it, and use it instead. // We do this to avoid setting the XOP namespace declaration on // a given field or descriptor's resolver, as it is only required // on the current element if (resolver != null) { prefix = resolver.resolveNamespaceURI(XMLConstants.XOP_URL); } if (prefix == null) { prefix = XMLConstants.XOP_PREFIX; // "xop"; resolver = new NamespaceResolver(); resolver.put(prefix, XMLConstants.XOP_URL); } includeField = new XMLField(prefix + XMLConstants.COLON + INCLUDE + "/@href"); includeField.setNamespaceResolver(resolver); } } XMLField textField = new XMLField(field.getXPath() + '/' + XMLConstants.TEXT); textField.setNamespaceResolver(field.getNamespaceResolver()); textField.setSchemaType(field.getSchemaType()); // field = textField; boolean inline = false; for (Object iter = cp.iteratorFor(attributeValue); cp.hasNext(iter); ) { Object element = cp.next(iter, session); element = getValueToWrite(element, object, record, field, includeField, session); if (element.getClass() == ClassConstants.ABYTE) { inline = true; } if (element != null) { elements.addElement(element); } } Object fieldValue = null; if (!elements.isEmpty()) { fieldValue = this.getDescriptor() .buildFieldValueFromDirectValues(elements, elementDataTypeName, session); } if (inline) { row.put(textField, fieldValue); } else { row.put(field, fieldValue); } }
/** INTERNAL: Conform the result if specified. */ protected Object conformResult( Object result, UnitOfWorkImpl unitOfWork, AbstractRecord arguments, boolean buildDirectlyFromRows) { if (getSelectionCriteria() != null) { ExpressionBuilder builder = getSelectionCriteria().getBuilder(); builder.setSession(unitOfWork.getRootSession(null)); builder.setQueryClass(getReferenceClass()); } // If the query is redirected then the collection returned might no longer // correspond to the original container policy. CR#2342-S.M. ContainerPolicy cp; if (getRedirector() != null) { cp = ContainerPolicy.buildPolicyFor(result.getClass()); } else { cp = getContainerPolicy(); } // This code is now a great deal different... For one, registration is done // as part of conforming. Also, this should only be called if one actually // is conforming. // First scan the UnitOfWork for conforming instances. // This will walk through the entire cache of registered objects. // Let p be objects from result not in the cache. // Let c be objects from cache. // Presently p intersect c = empty set, but later p subset c. // By checking cache now doesConform will be called p fewer times. Map indexedInterimResult = unitOfWork.scanForConformingInstances( getSelectionCriteria(), getReferenceClass(), arguments, this); Cursor cursor = null; // In the case of cursors just conform/register the initially read collection. if (cp.isCursorPolicy()) { cursor = (Cursor) result; cp = ContainerPolicy.buildPolicyFor(ClassConstants.Vector_class); // In nested UnitOfWork session might have been session of the parent. cursor.setSession(unitOfWork); result = cursor.getObjectCollection(); // for later incremental conforming... cursor.setInitiallyConformingIndex(indexedInterimResult); cursor.setSelectionCriteriaClone(getSelectionCriteria()); cursor.setTranslationRow(arguments); } // Now conform the result from the database. // Remove any deleted or changed objects that no longer conform. // Deletes will only work for simple queries, queries with or's or anyof's may not return // correct results when untriggered indirection is in the model. Vector fromDatabase = null; // When building directly from rows, one of the performance benefits // is that we no longer have to wrap and then unwrap the originals. // result is just a vector, not a container of wrapped originals. if (buildDirectlyFromRows) { Vector rows = (Vector) result; fromDatabase = new Vector(rows.size()); for (int i = 0; i < rows.size(); i++) { Object object = rows.elementAt(i); // null is placed in the row collection for 1-m joining to filter duplicate rows. if (object != null) { Object clone = conformIndividualResult( object, unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult, buildDirectlyFromRows); if (clone != null) { fromDatabase.addElement(clone); } } } } else { fromDatabase = new Vector(cp.sizeFor(result)); AbstractSession sessionToUse = unitOfWork.getParent(); for (Object iter = cp.iteratorFor(result); cp.hasNext(iter); ) { Object object = cp.next(iter, sessionToUse); Object clone = conformIndividualResult( object, unitOfWork, arguments, getSelectionCriteria(), indexedInterimResult, buildDirectlyFromRows); if (clone != null) { fromDatabase.addElement(clone); } } } // Now add the unwrapped conforming instances into an appropriate container. // Wrapping is done automatically. // Make sure a vector of exactly the right size is returned. Object conformedResult = cp.containerInstance(indexedInterimResult.size() + fromDatabase.size()); Object eachClone; for (Iterator enumtr = indexedInterimResult.values().iterator(); enumtr.hasNext(); ) { eachClone = enumtr.next(); cp.addInto(eachClone, conformedResult, unitOfWork); } for (Enumeration enumtr = fromDatabase.elements(); enumtr.hasMoreElements(); ) { eachClone = enumtr.nextElement(); cp.addInto(eachClone, conformedResult, unitOfWork); } if (cursor != null) { cursor.setObjectCollection((Vector) conformedResult); // For nested UOW must copy all in object collection to // initiallyConformingIndex, as some of these could have been from // the parent UnitOfWork. if (unitOfWork.isNestedUnitOfWork()) { for (Enumeration enumtr = cursor.getObjectCollection().elements(); enumtr.hasMoreElements(); ) { Object clone = enumtr.nextElement(); indexedInterimResult.put(clone, clone); } } return cursor; } else { return conformedResult; } }
/** * INTERNAL: All objects queried via a UnitOfWork get registered here. If the query went to the * database. * * <p>Involves registering the query result individually and in totality, and hence refreshing / * conforming is done here. * * @param result may be collection (read all) or an object (read one), or even a cursor. If in * transaction the shared cache will be bypassed, meaning the result may not be originals from * the parent but raw database rows. * @param unitOfWork the unitOfWork the result is being registered in. * @param arguments the original arguments/parameters passed to the query execution. Used by * conforming * @param buildDirectlyFromRows If in transaction must construct a registered result from raw * database rows. * @return the final (conformed, refreshed, wrapped) UnitOfWork query result */ public Object registerResultInUnitOfWork( Object result, UnitOfWorkImpl unitOfWork, AbstractRecord arguments, boolean buildDirectlyFromRows) { // For bug 2612366: Conforming results in UOW extremely slow. // Replacing results with registered versions in the UOW is a part of // conforming and is now done while conforming to maximize performance. if (shouldConformResultsInUnitOfWork() || this.descriptor.shouldAlwaysConformResultsInUnitOfWork()) { return conformResult(result, unitOfWork, arguments, buildDirectlyFromRows); } // When building directly from rows, one of the performance benefits // is that we no longer have to wrap and then unwrap the originals. // result is just a vector, not a collection of wrapped originals. // Also for cursors the initial connection is automatically registered. if (buildDirectlyFromRows) { List<AbstractRecord> rows = (List<AbstractRecord>) result; ContainerPolicy cp = getContainerPolicy(); int size = rows.size(); Object clones = cp.containerInstance(size); for (int index = 0; index < size; index++) { AbstractRecord row = rows.get(index); // null is placed in the row collection for 1-m joining to filter duplicate rows. if (row != null) { Object clone = buildObject(row); cp.addInto(clone, clones, unitOfWork, row, this); } } return clones; } ContainerPolicy cp; Cursor cursor = null; // If the query is redirected then the collection returned might no longer // correspond to the original container policy. CR#2342-S.M. if (getRedirector() != null) { cp = ContainerPolicy.buildPolicyFor(result.getClass()); } else { cp = getContainerPolicy(); } // In the case of cursors just register the initially read collection. if (cp.isCursorPolicy()) { cursor = (Cursor) result; // In nested UnitOfWork session might have been session of the parent. cursor.setSession(unitOfWork); cp = ContainerPolicy.buildPolicyFor(ClassConstants.Vector_class); result = cursor.getObjectCollection(); } Object clones = cp.containerInstance(cp.sizeFor(result)); AbstractSession sessionToUse = unitOfWork.getParent(); for (Object iter = cp.iteratorFor(result); cp.hasNext(iter); ) { Object object = cp.next(iter, sessionToUse); Object clone = registerIndividualResult(object, unitOfWork, this.joinedAttributeManager); cp.addInto(clone, clones, unitOfWork); } if (cursor != null) { cursor.setObjectCollection((Vector) clones); return cursor; } else { return clones; } }