@Override public RelOptCost computeSelfCost(RelOptPlanner planner) { if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) { // We use multiplier 0.05 for TopN operator, and 0.1 for Sort, to make TopN a preferred // choice. return super.computeSelfCost(planner).multiplyBy(.1); } RelNode child = this.getInput(); double inputRows = RelMetadataQuery.getRowCount(child); // int rowWidth = child.getRowType().getPrecision(); int numSortFields = this.collation.getFieldCollations().size(); double cpuCost = DrillCostBase.COMPARE_CPU_COST * numSortFields * inputRows * (Math.log(inputRows) / Math.log(2)); double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use rowWidth instead of avgFieldWidth * numFields // avgFieldWidth * numFields * inputRows double numFields = this.getRowType().getFieldCount(); long fieldWidth = PrelUtil.getPlannerSettings(planner) .getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY) .num_val; double memCost = fieldWidth * numFields * inputRows; DrillCostFactory costFactory = (DrillCostFactory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0, memCost); }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner) { if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return super.computeSelfCost(planner).multiplyBy(.1); } RelNode child = this.getChild(); double inputRows = RelMetadataQuery.getRowCount(child); int rowWidth = child.getRowType().getFieldCount() * DrillCostBase.AVG_FIELD_WIDTH; double hashCpuCost = DrillCostBase.HASH_CPU_COST * inputRows * distFields.size(); double svrCpuCost = DrillCostBase.SVR_CPU_COST * inputRows; double mergeCpuCost = DrillCostBase.COMPARE_CPU_COST * inputRows * (Math.log(numEndPoints) / Math.log(2)); double networkCost = DrillCostBase.BYTE_NETWORK_COST * inputRows * rowWidth; DrillCostFactory costFactory = (DrillCostFactory) planner.getCostFactory(); return costFactory.makeCost(inputRows, hashCpuCost + svrCpuCost + mergeCpuCost, 0, networkCost); }
/// TODO: this method is same as the one for ScanPrel...eventually we should consolidate /// this and few other methods in a common base class which would be extended /// by both logical and physical rels. @Override public RelOptCost computeSelfCost(final RelOptPlanner planner) { final ScanStats stats = groupScan.getScanStats(settings); int columnCount = getRowType().getFieldCount(); double ioCost = 0; boolean isStarQuery = Iterables.tryFind( getRowType().getFieldNames(), new Predicate<String>() { @Override public boolean apply(String input) { return Preconditions.checkNotNull(input).equals("*"); } }) .isPresent(); if (isStarQuery) { columnCount = STAR_COLUMN_COST; } // double rowCount = RelMetadataQuery.getRowCount(this); double rowCount = stats.getRecordCount(); if (rowCount < 1) { rowCount = 1; } if (PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return planner .getCostFactory() .makeCost(rowCount * columnCount, stats.getCpuCost(), stats.getDiskCost()); } double cpuCost = rowCount * columnCount; // for now, assume cpu cost is proportional to row count. // Even though scan is reading from disk, in the currently generated plans all plans will // need to read the same amount of data, so keeping the disk io cost 0 is ok for now. // In the future we might consider alternative scans that go against projections or // different compression schemes etc that affect the amount of data read. Such alternatives // would affect both cpu and io cost. DrillCostFactory costFactory = (DrillCostFactory) planner.getCostFactory(); return costFactory.makeCost(rowCount, cpuCost, ioCost, 0); }