/** Decrease the count of running tasks of a certain task runner */ private synchronized void decreaseConcurrency(int volumeId) { if (diskVolumeLoads.containsKey(volumeId)) { Integer concurrency = diskVolumeLoads.get(volumeId); if (concurrency > 0) { diskVolumeLoads.put(volumeId, concurrency - 1); } else { if (volumeId > REMOTE && !unassignedTaskForEachVolume.containsKey(volumeId)) { diskVolumeLoads.remove(volumeId); } } } }
public synchronized void addTaskAttempt(int volumeId, TaskAttempt attemptId) { synchronized (unassignedTaskForEachVolume) { LinkedHashSet<TaskAttempt> list = unassignedTaskForEachVolume.get(volumeId); if (list == null) { list = new LinkedHashSet<>(); unassignedTaskForEachVolume.put(volumeId, list); } list.add(attemptId); } remainTasksNum.incrementAndGet(); if (!diskVolumeLoads.containsKey(volumeId)) diskVolumeLoads.put(volumeId, 0); }
private synchronized TaskAttemptId getAndRemove(int volumeId) { TaskAttemptId taskAttemptId = null; if (!unassignedTaskForEachVolume.containsKey(volumeId)) { if (volumeId > REMOTE) { diskVolumeLoads.remove(volumeId); } return taskAttemptId; } LinkedHashSet<TaskAttempt> list = unassignedTaskForEachVolume.get(volumeId); if (list != null && !list.isEmpty()) { TaskAttempt taskAttempt; synchronized (unassignedTaskForEachVolume) { Iterator<TaskAttempt> iterator = list.iterator(); taskAttempt = iterator.next(); iterator.remove(); } taskAttemptId = taskAttempt.getId(); for (DataLocation location : taskAttempt.getTask().getDataLocations()) { HostVolumeMapping volumeMapping = scheduledRequests.leafTaskHostMapping.get(location.getHost()); if (volumeMapping != null) { volumeMapping.removeTaskAttempt(location.getVolumeId(), taskAttempt); } } increaseConcurrency(volumeId); } return taskAttemptId; }
/** * Increase the count of running tasks and disk loads for a certain task runner. * * @param volumeId Volume identifier * @return the volume load (i.e., how many running tasks use this volume) */ private synchronized int increaseConcurrency(int volumeId) { int concurrency = 1; if (diskVolumeLoads.containsKey(volumeId)) { concurrency = diskVolumeLoads.get(volumeId) + 1; } if (volumeId > -1) { LOG.info( "Assigned host : " + host + ", Volume : " + volumeId + ", Concurrency : " + concurrency); } else if (volumeId == -1) { // this case is disabled namenode block meta or compressed text file or amazon s3 LOG.info( "Assigned host : " + host + ", Unknown Volume : " + volumeId + ", Concurrency : " + concurrency); } else if (volumeId == REMOTE) { // this case has processed all block on host and it will be assigned to remote LOG.info( "Assigned host : " + host + ", Remaining local tasks : " + getRemainingLocalTaskSize() + ", Remote Concurrency : " + concurrency); } diskVolumeLoads.put(volumeId, concurrency); return concurrency; }
private synchronized void removeTaskAttempt(int volumeId, TaskAttempt taskAttempt) { if (!unassignedTaskForEachVolume.containsKey(volumeId)) return; LinkedHashSet<TaskAttempt> tasks = unassignedTaskForEachVolume.get(volumeId); if (tasks.remove(taskAttempt)) { remainTasksNum.getAndDecrement(); } if (tasks.isEmpty()) { unassignedTaskForEachVolume.remove(volumeId); if (volumeId > REMOTE) { diskVolumeLoads.remove(volumeId); } } }
/** volume of a host : 0 ~ n compressed task, amazon s3, unKnown volume : -1 remote task : -2 */ public int getLowestVolumeId() { Map.Entry<Integer, Integer> volumeEntry = null; for (Map.Entry<Integer, Integer> entry : diskVolumeLoads.entrySet()) { if (volumeEntry == null) volumeEntry = entry; if (volumeEntry.getValue() >= entry.getValue()) { volumeEntry = entry; } } if (volumeEntry != null) { return volumeEntry.getKey(); } else { return REMOTE; } }
public int getVolumeConcurrency(int volumeId) { Integer size = diskVolumeLoads.get(volumeId); if (size == null) return 0; else return size; }