private void assignEndpoints( Collection<DrillbitEndpoint> allNodes, PlanningSet planningSet, int globalMaxWidth, int maxWidthPerEndpoint) throws PhysicalOperatorSetupException { // First we determine the amount of parallelization for a fragment. This will be between 1 and // maxWidth based on // cost. (Later could also be based on cluster operation.) then we decide endpoints based on // affinity (later this // could be based on endpoint load) for (Wrapper wrapper : planningSet) { Stats stats = wrapper.getStats(); // figure out width. int width = Math.min(stats.getMaxWidth(), globalMaxWidth); float diskCost = stats.getDiskCost(); // logger.debug("Frag max width: {} and diskCost: {}", stats.getMaxWidth(), diskCost); // TODO: right now we'll just assume that each task is cost 1 so we'll set the breadth at the // lesser of the number // of tasks or the maximum width of the fragment. if (diskCost < width) { // width = (int) diskCost; } width = Math.min(width, maxWidthPerEndpoint * allNodes.size()); if (width < 1) width = 1; // logger.debug("Setting width {} on fragment {}", width, wrapper); wrapper.setWidth(width); // figure out endpoint assignments. also informs the exchanges about their respective // endpoints. wrapper.assignEndpoints(allNodes); } }
private QueryWorkUnit generateWorkUnit( DrillbitEndpoint foremanNode, QueryId queryId, PhysicalPlanReader reader, Fragment rootNode, PlanningSet planningSet) throws ExecutionSetupException { List<PlanFragment> fragments = Lists.newArrayList(); PlanFragment rootFragment = null; FragmentRoot rootOperator = null; long queryStartTime = System.currentTimeMillis(); // now we generate all the individual plan fragments and associated assignments. Note, we need // all endpoints // assigned before we can materialize, so we start a new loop here rather than utilizing the // previous one. for (Wrapper wrapper : planningSet) { Fragment node = wrapper.getNode(); Stats stats = node.getStats(); final PhysicalOperator physicalOperatorRoot = node.getRoot(); boolean isRootNode = rootNode == node; if (isRootNode && wrapper.getWidth() != 1) throw new FragmentSetupException( String.format( "Failure while trying to setup fragment. The root fragment must always have parallelization one. In the current case, the width was set to %d.", wrapper.getWidth())); // a fragment is self driven if it doesn't rely on any other exchanges. boolean isLeafFragment = node.getReceivingExchangePairs().size() == 0; // Create a minorFragment for each major fragment. for (int minorFragmentId = 0; minorFragmentId < wrapper.getWidth(); minorFragmentId++) { IndexedFragmentNode iNode = new IndexedFragmentNode(minorFragmentId, wrapper); PhysicalOperator op = physicalOperatorRoot.accept(materializer, iNode); Preconditions.checkArgument(op instanceof FragmentRoot); FragmentRoot root = (FragmentRoot) op; // get plan as JSON String plan; try { plan = reader.writeJson(root); } catch (JsonProcessingException e) { throw new FragmentSetupException( "Failure while trying to convert fragment into json.", e); } FragmentHandle handle = FragmentHandle // .newBuilder() // .setMajorFragmentId(wrapper.getMajorFragmentId()) // .setMinorFragmentId(minorFragmentId) // .setQueryId(queryId) // .build(); PlanFragment fragment = PlanFragment.newBuilder() // .setCpuCost(stats.getCpuCost()) // .setDiskCost(stats.getDiskCost()) // .setForeman(foremanNode) // .setMemoryCost(stats.getMemoryCost()) // .setNetworkCost(stats.getNetworkCost()) // .setFragmentJson(plan) // .setHandle(handle) // .setAssignment(wrapper.getAssignedEndpoint(minorFragmentId)) // .setLeafFragment(isLeafFragment) // .setQueryStartTime(queryStartTime) .build(); if (isRootNode) { logger.debug("Root fragment:\n {}", fragment); rootFragment = fragment; rootOperator = root; } else { logger.debug("Remote fragment:\n {}", fragment); fragments.add(fragment); } } } return new QueryWorkUnit(rootOperator, rootFragment, fragments); }