@Override public synchronized void requestInstance( final JobID jobID, final Configuration conf, final InstanceRequestMap instanceRequestMap, final List<String> splitAffinityList) throws InstanceException { final List<AllocatedSlice> newlyAllocatedSlicesOfJob = new ArrayList<AllocatedSlice>(); final Map<InstanceType, Integer> pendingRequests = new HashMap<InstanceType, Integer>(); // Iterate over all instance types for (final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMaximumIterator(); it.hasNext(); ) { // Iterate over all requested instances of a specific type final Map.Entry<InstanceType, Integer> entry = it.next(); final int maximumNumberOfInstances = entry.getValue().intValue(); for (int i = 0; i < maximumNumberOfInstances; i++) { LOG.info("Trying to allocate instance of type " + entry.getKey().getIdentifier()); // TODO: Introduce topology awareness here // TODO: Daniel: Code taken from AbstractScheduler.. final AllocatedSlice slice = getSliceOfType(jobID, entry.getKey()); if (slice == null) { if (i < instanceRequestMap.getMinimumNumberOfInstances(entry.getKey())) { // The request cannot be fulfilled, release the slices again and throw an exception for (final AllocatedSlice sliceToRelease : newlyAllocatedSlicesOfJob) { sliceToRelease .getHostingInstance() .removeAllocatedSlice(sliceToRelease.getAllocationID()); } // TODO: Remove previously allocated slices again throw new InstanceException("Could not find a suitable instance"); } else { // Remaining instances are pending final int numberOfRemainingInstances = maximumNumberOfInstances - i; if (numberOfRemainingInstances > 0) { // Store the request for the missing instances Integer val = pendingRequests.get(entry.getKey()); if (val == null) { val = Integer.valueOf(0); } val = Integer.valueOf(val.intValue() + numberOfRemainingInstances); pendingRequests.put(entry.getKey(), val); } break; } } newlyAllocatedSlicesOfJob.add(slice); } } // The request could be processed successfully, so update internal bookkeeping. List<AllocatedSlice> allAllocatedSlicesOfJob = this.slicesOfJobs.get(jobID); if (allAllocatedSlicesOfJob == null) { allAllocatedSlicesOfJob = new ArrayList<AllocatedSlice>(); this.slicesOfJobs.put(jobID, allAllocatedSlicesOfJob); } allAllocatedSlicesOfJob.addAll(newlyAllocatedSlicesOfJob); PendingRequestsMap allPendingRequestsOfJob = this.pendingRequestsOfJob.get(jobID); if (allPendingRequestsOfJob == null) { allPendingRequestsOfJob = new PendingRequestsMap(); this.pendingRequestsOfJob.put(jobID, allPendingRequestsOfJob); } for (final Iterator<Map.Entry<InstanceType, Integer>> it = pendingRequests.entrySet().iterator(); it.hasNext(); ) { final Map.Entry<InstanceType, Integer> entry = it.next(); allPendingRequestsOfJob.addRequest(entry.getKey(), entry.getValue().intValue()); } // Finally, create the list of allocated resources for the scheduler final List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>(); for (final AllocatedSlice slice : newlyAllocatedSlicesOfJob) { allocatedResources.add( new AllocatedResource( slice.getHostingInstance(), slice.getType(), slice.getAllocationID())); } if (this.instanceListener != null) { final ClusterInstanceNotifier clusterInstanceNotifier = new ClusterInstanceNotifier(this.instanceListener, jobID, allocatedResources); clusterInstanceNotifier.start(); } }
/** Checks if a pending request can be fulfilled. */ private void checkPendingRequests() { final Iterator<Map.Entry<JobID, PendingRequestsMap>> it = this.pendingRequestsOfJob.entrySet().iterator(); while (it.hasNext()) { final List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>(); final Map.Entry<JobID, PendingRequestsMap> entry = it.next(); final JobID jobID = entry.getKey(); final PendingRequestsMap pendingRequestsMap = entry.getValue(); final Iterator<Map.Entry<InstanceType, Integer>> it2 = pendingRequestsMap.iterator(); while (it2.hasNext()) { final Map.Entry<InstanceType, Integer> entry2 = it2.next(); final InstanceType requestedInstanceType = entry2.getKey(); int numberOfPendingInstances = entry2.getValue().intValue(); // Consistency check if (numberOfPendingInstances <= 0) { LOG.error( "Inconsistency: Job " + jobID + " has " + numberOfPendingInstances + " requests for instance type " + requestedInstanceType.getIdentifier()); continue; } while (numberOfPendingInstances > 0) { if (LOG.isDebugEnabled()) { LOG.debug( "Trying to allocate instance of type " + requestedInstanceType.getIdentifier()); } // TODO: Introduce topology awareness here final AllocatedSlice slice = getSliceOfType(jobID, requestedInstanceType); if (slice == null) { break; } else { LOG.info( "Allocated instance of type " + requestedInstanceType.getIdentifier() + " as a result of pending request for job " + jobID); // Decrease number of pending instances --numberOfPendingInstances; pendingRequestsMap.decreaseNumberOfPendingInstances(requestedInstanceType); List<AllocatedSlice> allocatedSlices = this.slicesOfJobs.get(jobID); if (allocatedSlices == null) { allocatedSlices = new ArrayList<AllocatedSlice>(); this.slicesOfJobs.put(jobID, allocatedSlices); } allocatedSlices.add(slice); allocatedResources.add( new AllocatedResource( slice.getHostingInstance(), slice.getType(), slice.getAllocationID())); } } } if (!allocatedResources.isEmpty() && this.instanceListener != null) { final ClusterInstanceNotifier clusterInstanceNotifier = new ClusterInstanceNotifier(this.instanceListener, jobID, allocatedResources); clusterInstanceNotifier.start(); } } }