public SubQueryCompletedEvent(final ExecutionBlockId executionBlockId, SubQueryState finalState) { super(executionBlockId.getQueryId(), QueryEventType.SUBQUERY_COMPLETED); this.executionBlockId = executionBlockId; this.finalState = finalState; }
@Override public void run() { LOG.info("Start TajoWorkerAllocationThread"); CallFuture<TajoMasterProtocol.WorkerResourceAllocationResponse> callBack = new CallFuture<TajoMasterProtocol.WorkerResourceAllocationResponse>(); int requiredMemoryMBSlot = 512; // TODO int requiredDiskSlots = 1; // TODO TajoMasterProtocol.WorkerResourceAllocationRequest request = TajoMasterProtocol.WorkerResourceAllocationRequest.newBuilder() .setMemoryMBSlots(requiredMemoryMBSlot) .setDiskSlots(requiredDiskSlots) .setNumWorks(event.getRequiredNum()) .setExecutionBlockId(event.getExecutionBlockId().getProto()) .build(); RpcConnectionPool connPool = RpcConnectionPool.getPool(queryTaskContext.getConf()); NettyClientBase tmClient = null; try { tmClient = connPool.getConnection( queryTaskContext.getQueryMasterContext().getWorkerContext().getTajoMasterAddress(), TajoMasterProtocol.class, true); TajoMasterProtocol.TajoMasterProtocolService masterClientService = tmClient.getStub(); masterClientService.allocateWorkerResources(null, request, callBack); } catch (Exception e) { connPool.closeConnection(tmClient); tmClient = null; LOG.error(e.getMessage(), e); } finally { connPool.releaseConnection(tmClient); } TajoMasterProtocol.WorkerResourceAllocationResponse response = null; while (!stopped.get()) { try { response = callBack.get(3, TimeUnit.SECONDS); break; } catch (InterruptedException e) { if (stopped.get()) { return; } } catch (TimeoutException e) { LOG.info("No available worker resource for " + event.getExecutionBlockId()); continue; } } int numAllocatedWorkers = 0; if (response != null) { List<TajoMasterProtocol.WorkerAllocatedResource> workerHosts = response.getWorkerAllocatedResourceList(); ExecutionBlockId executionBlockId = event.getExecutionBlockId(); List<Container> containers = new ArrayList<Container>(); for (TajoMasterProtocol.WorkerAllocatedResource eachWorker : workerHosts) { TajoWorkerContainer container = new TajoWorkerContainer(); NodeIdPBImpl nodeId = new NodeIdPBImpl(); nodeId.setHost(eachWorker.getWorkerHost()); nodeId.setPort(eachWorker.getPeerRpcPort()); TajoWorkerContainerId containerId = new TajoWorkerContainerId(); containerId.setApplicationAttemptId( ApplicationIdUtils.createApplicationAttemptId(executionBlockId.getQueryId())); containerId.setId(containerIdSeq.incrementAndGet()); container.setId(containerId); container.setNodeId(nodeId); WorkerResource workerResource = new WorkerResource(); workerResource.setAllocatedHost(nodeId.getHost()); workerResource.setPeerRpcPort(nodeId.getPort()); workerResource.setQueryMasterPort(eachWorker.getQueryMasterPort()); workerResource.setPullServerPort(eachWorker.getWorkerPullServerPort()); workerResource.setMemoryMBSlots(requiredMemoryMBSlot); workerResource.setDiskSlots(requiredDiskSlots); container.setWorkerResource(workerResource); containers.add(container); } SubQueryState state = queryTaskContext.getSubQuery(executionBlockId).getState(); if (!SubQuery.isRunningState(state)) { List<WorkerResource> workerResources = new ArrayList<WorkerResource>(); for (Container eachContainer : containers) { workerResources.add(((TajoWorkerContainer) eachContainer).getWorkerResource()); } try { TajoContainerProxy.releaseWorkerResource( queryTaskContext, executionBlockId, workerResources); } catch (Exception e) { LOG.error(e.getMessage(), e); } return; } if (workerHosts.size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("SubQueryContainerAllocationEvent fire:" + executionBlockId); } queryTaskContext .getEventHandler() .handle(new SubQueryContainerAllocationEvent(executionBlockId, containers)); } numAllocatedWorkers += workerHosts.size(); } if (event.getRequiredNum() > numAllocatedWorkers) { ContainerAllocationEvent shortRequestEvent = new ContainerAllocationEvent( event.getType(), event.getExecutionBlockId(), event.getPriority(), event.getResource(), event.getRequiredNum() - numAllocatedWorkers, event.isLeafQuery(), event.getProgress()); queryTaskContext.getEventHandler().handle(shortRequestEvent); } LOG.info("Stop TajoWorkerAllocationThread"); }
@Test public void testCreateHashFetchURL() throws Exception { QueryId q1 = TestTajoIds.createQueryId(1315890136000l, 2); String hostName = "tajo1"; int port = 1234; ExecutionBlockId sid = new ExecutionBlockId(q1, 2); int numPartition = 10; Map<Integer, List<IntermediateEntry>> intermediateEntries = new HashMap<Integer, List<IntermediateEntry>>(); for (int i = 0; i < numPartition; i++) { intermediateEntries.put(i, new ArrayList<IntermediateEntry>()); } for (int i = 0; i < 1000; i++) { int partitionId = i % numPartition; IntermediateEntry entry = new IntermediateEntry(i, 0, partitionId, new Task.PullHost(hostName, port)); entry.setEbId(sid); entry.setVolume(10); intermediateEntries.get(partitionId).add(entry); } Map<Integer, Map<ExecutionBlockId, List<IntermediateEntry>>> hashEntries = new HashMap<Integer, Map<ExecutionBlockId, List<IntermediateEntry>>>(); for (Map.Entry<Integer, List<IntermediateEntry>> eachEntry : intermediateEntries.entrySet()) { FetchImpl fetch = new FetchImpl( new Task.PullHost(hostName, port), ShuffleType.HASH_SHUFFLE, sid, eachEntry.getKey(), eachEntry.getValue()); fetch.setName(sid.toString()); FetchProto proto = fetch.getProto(); fetch = new FetchImpl(proto); assertEquals(proto, fetch.getProto()); Map<ExecutionBlockId, List<IntermediateEntry>> ebEntries = new HashMap<ExecutionBlockId, List<IntermediateEntry>>(); ebEntries.put(sid, eachEntry.getValue()); hashEntries.put(eachEntry.getKey(), ebEntries); List<URI> uris = fetch.getURIs(); assertEquals(1, uris.size()); // In Hash Suffle, Fetcher return only one URI per partition. URI uri = uris.get(0); final Map<String, List<String>> params = new QueryStringDecoder(uri).parameters(); assertEquals(eachEntry.getKey().toString(), params.get("p").get(0)); assertEquals("h", params.get("type").get(0)); assertEquals("" + sid.getId(), params.get("sid").get(0)); } Map<Integer, Map<ExecutionBlockId, List<IntermediateEntry>>> mergedHashEntries = Repartitioner.mergeIntermediateByPullHost(hashEntries); assertEquals(numPartition, mergedHashEntries.size()); for (int i = 0; i < numPartition; i++) { Map<ExecutionBlockId, List<IntermediateEntry>> eachEntry = mergedHashEntries.get(0); assertEquals(1, eachEntry.size()); List<IntermediateEntry> interEntry = eachEntry.get(sid); assertEquals(1, interEntry.size()); assertEquals(1000, interEntry.get(0).getVolume()); } }