@Override public double getRows() { // REVIEW jvs 30-May-2005: I just pulled this out of a hat. double dRows = RelMetadataQuery.getRowCount(inputs.get(0)); for (int i = 1; i < inputs.size(); i++) { dRows -= 0.5 * RelMetadataQuery.getRowCount(inputs.get(i)); } if (dRows < 0) { dRows = 0; } return dRows; }
public MapJoinStreamingRelation getStreamingSide() { RelMetadataQuery mq = RelMetadataQuery.instance(); Double leftInputSize = mq.memory(left); Double rightInputSize = mq.memory(right); if (leftInputSize == null && rightInputSize == null) { return MapJoinStreamingRelation.NONE; } else if (leftInputSize != null && (rightInputSize == null || (leftInputSize < rightInputSize))) { return MapJoinStreamingRelation.RIGHT_RELATION; } else if (rightInputSize != null && (leftInputSize == null || (rightInputSize <= leftInputSize))) { return MapJoinStreamingRelation.LEFT_RELATION; } return MapJoinStreamingRelation.NONE; }
/** * Given a list of predicates to push down, this methods returns the set of predicates that still * need to be pushed. Predicates need to be pushed because 1) their String representation is not * included in input set of predicates to exclude, or 2) they are already in the subtree rooted at * the input node. This method updates the set of predicates to exclude with the String * representation of the predicates in the output and in the subtree. * * @param predicatesToExclude String representation of predicates that should be excluded * @param inp root of the subtree * @param predsToPushDown candidate predicates to push down through the subtree * @return list of predicates to push down */ public static ImmutableList<RexNode> getPredsNotPushedAlready( Set<String> predicatesToExclude, RelNode inp, List<RexNode> predsToPushDown) { // Bail out if there is nothing to push if (predsToPushDown.isEmpty()) { return ImmutableList.of(); } // Build map to not convert multiple times, further remove already included predicates Map<String, RexNode> stringToRexNode = Maps.newLinkedHashMap(); for (RexNode r : predsToPushDown) { String rexNodeString = r.toString(); if (predicatesToExclude.add(rexNodeString)) { stringToRexNode.put(rexNodeString, r); } } if (stringToRexNode.isEmpty()) { return ImmutableList.of(); } // Finally exclude preds that are already in the subtree as given by the metadata provider // Note: this is the last step, trying to avoid the expensive call to the metadata provider // if possible Set<String> predicatesInSubtree = Sets.newHashSet(); for (RexNode pred : RelMetadataQuery.instance().getPulledUpPredicates(inp).pulledUpPredicates) { predicatesInSubtree.add(pred.toString()); predicatesInSubtree.addAll(Lists.transform(RelOptUtil.conjunctions(pred), REX_STR_FN)); } final ImmutableList.Builder<RexNode> newConjuncts = ImmutableList.builder(); for (Entry<String, RexNode> e : stringToRexNode.entrySet()) { if (predicatesInSubtree.add(e.getKey())) { newConjuncts.add(e.getValue()); } } predicatesToExclude.addAll(predicatesInSubtree); return newConjuncts.build(); }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { // We always "build" the double rowCount = mq.getRowCount(this); return planner.getCostFactory().makeCost(rowCount, 0, 0); }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner) { if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) { // We use multiplier 0.05 for TopN operator, and 0.1 for Sort, to make TopN a preferred // choice. return super.computeSelfCost(planner).multiplyBy(.1); } RelNode child = this.getInput(); double inputRows = RelMetadataQuery.getRowCount(child); // int rowWidth = child.getRowType().getPrecision(); int numSortFields = this.collation.getFieldCollations().size(); double cpuCost = DrillCostBase.COMPARE_CPU_COST * numSortFields * inputRows * (Math.log(inputRows) / Math.log(2)); double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use rowWidth instead of avgFieldWidth * numFields // avgFieldWidth * numFields * inputRows double numFields = this.getRowType().getFieldCount(); long fieldWidth = PrelUtil.getPlannerSettings(planner) .getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY) .num_val; double memCost = fieldWidth * numFields * inputRows; DrillCostFactory costFactory = (DrillCostFactory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0, memCost); }
public ImmutableBitSet getSortedInputs() throws CalciteSemanticException { ImmutableBitSet.Builder sortedInputsBuilder = ImmutableBitSet.builder(); JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this); List<ImmutableIntList> joinKeysInChildren = new ArrayList<ImmutableIntList>(); joinKeysInChildren.add( ImmutableIntList.copyOf(joinPredInfo.getProjsFromLeftPartOfJoinKeysInChildSchema())); joinKeysInChildren.add( ImmutableIntList.copyOf(joinPredInfo.getProjsFromRightPartOfJoinKeysInChildSchema())); for (int i = 0; i < this.getInputs().size(); i++) { boolean correctOrderFound = RelCollations.contains( RelMetadataQuery.instance().collations(this.getInputs().get(i)), joinKeysInChildren.get(i)); if (correctOrderFound) { sortedInputsBuilder.set(i); } } return sortedInputsBuilder.build(); }
public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { double dRows = mq.getRowCount(this); double dCpu = mq.getRowCount(getInput()) * program.getExprCount(); double dIo = 0; return planner.getCostFactory().makeCost(dRows, dCpu, dIo); }
/** Model cost of join as size of Inputs. */ @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { return mq.getNonCumulativeCost(this); }