private RelNode convertToRel(SqlNode node) throws RelConversionException { final RelNode convertedNode = planner.convert(node); final RelMetadataProvider provider = convertedNode.getCluster().getMetadataProvider(); // Register RelMetadataProvider with HepPlanner. final List<RelMetadataProvider> list = Lists.newArrayList(provider); hepPlanner.registerMetadataProviders(list); final RelMetadataProvider cachingMetaDataProvider = new CachingRelMetadataProvider(ChainedRelMetadataProvider.of(list), hepPlanner); convertedNode.accept(new MetaDataProviderModifier(cachingMetaDataProvider)); // HepPlanner is specifically used for Window Function planning only. hepPlanner.setRoot(convertedNode); RelNode rel = hepPlanner.findBestExp(); rel.accept(new MetaDataProviderModifier(provider)); return rel; }
private TypedSqlNode validateNode(SqlNode sqlNode) throws ValidationException, RelConversionException, ForemanSetupException { TypedSqlNode typedSqlNode = planner.validateAndGetType(sqlNode); SqlNode sqlNodeValidated = typedSqlNode.getSqlNode(); // Check if the unsupported functionality is used UnsupportedOperatorsVisitor visitor = UnsupportedOperatorsVisitor.createVisitor(context); try { sqlNodeValidated.accept(visitor); } catch (UnsupportedOperationException ex) { // If the exception due to the unsupported functionalities visitor.convertException(); // If it is not, let this exception move forward to higher logic throw ex; } return typedSqlNode; }
private RelNode preprocessNode(RelNode rel) throws SqlUnsupportedException { /* * Traverse the tree to do the following pre-processing tasks: 1. replace the convert_from, convert_to function to * actual implementations Eg: convert_from(EXPR, 'JSON') be converted to convert_fromjson(EXPR); TODO: Ideally all * function rewrites would move here instead of DrillOptiq. * * 2. see where the tree contains unsupported functions; throw SqlUnsupportedException if there is any. */ PreProcessLogicalRel visitor = PreProcessLogicalRel.createVisitor( planner.getTypeFactory(), context.getDrillOperatorTable()); try { rel = rel.accept(visitor); } catch (UnsupportedOperationException ex) { visitor.convertException(); throw ex; } return rel; }
/** * Do logical planning using both VolcanoPlanner and LOPT HepPlanner. * * @param relNode * @return * @throws RelConversionException * @throws SqlUnsupportedException */ private RelNode logicalPlanningVolcanoAndLopt(RelNode relNode) throws RelConversionException, SqlUnsupportedException { final RelNode convertedRelNode = planner.transform( DrillSqlWorker.LOGICAL_CONVERT_RULES, relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL), relNode); log("VolCalciteRel", convertedRelNode, logger); final RelNode loptNode = getLoptJoinOrderTree( convertedRelNode, DrillJoinRel.class, DrillRelFactories.DRILL_LOGICAL_JOIN_FACTORY, DrillRelFactories.DRILL_LOGICAL_FILTER_FACTORY, DrillRelFactories.DRILL_LOGICAL_PROJECT_FACTORY); log("HepCalciteRel", loptNode, logger); return loptNode; }
private RelNode logicalPlanningVolcano(RelNode relNode) throws RelConversionException, SqlUnsupportedException { return planner.transform( DrillSqlWorker.LOGICAL_RULES, relNode.getTraitSet().plus(DrillRel.DRILL_LOGICAL), relNode); }
protected Prel convertToPrel(RelNode drel) throws RelConversionException, SqlUnsupportedException { Preconditions.checkArgument(drel.getConvention() == DrillRel.DRILL_LOGICAL); RelTraitSet traits = drel.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON); Prel phyRelNode; try { final RelNode relNode = planner.transform(DrillSqlWorker.PHYSICAL_MEM_RULES, traits, drel); phyRelNode = (Prel) relNode.accept(new PrelFinalizer()); } catch (RelOptPlanner.CannotPlanException ex) { logger.error(ex.getMessage()); if (JoinUtils.checkCartesianJoin(drel, new ArrayList<Integer>(), new ArrayList<Integer>())) { throw new UnsupportedRelOperatorException( "This query cannot be planned possibly due to either a cartesian join or an inequality join"); } else { throw ex; } } OptionManager queryOptions = context.getOptions(); if (context.getPlannerSettings().isMemoryEstimationEnabled() && !MemoryEstimationVisitor.enoughMemory( phyRelNode, queryOptions, context.getActiveEndpoints().size())) { log("Not enough memory for this plan", phyRelNode, logger); logger.debug("Re-planning without hash operations."); queryOptions.setOption( OptionValue.createBoolean( OptionValue.OptionType.QUERY, PlannerSettings.HASHJOIN.getOptionName(), false)); queryOptions.setOption( OptionValue.createBoolean( OptionValue.OptionType.QUERY, PlannerSettings.HASHAGG.getOptionName(), false)); try { final RelNode relNode = planner.transform(DrillSqlWorker.PHYSICAL_MEM_RULES, traits, drel); phyRelNode = (Prel) relNode.accept(new PrelFinalizer()); } catch (RelOptPlanner.CannotPlanException ex) { logger.error(ex.getMessage()); if (JoinUtils.checkCartesianJoin( drel, new ArrayList<Integer>(), new ArrayList<Integer>())) { throw new UnsupportedRelOperatorException( "This query cannot be planned possibly due to either a cartesian join or an inequality join"); } else { throw ex; } } } /* The order of the following transformation is important */ /* * 0.) For select * from join query, we need insert project on top of scan and a top project just * under screen operator. The project on top of scan will rename from * to T1*, while the top project * will rename T1* to *, before it output the final result. Only the top project will allow * duplicate columns, since user could "explicitly" ask for duplicate columns ( select *, col, *). * The rest of projects will remove the duplicate column when we generate POP in json format. */ phyRelNode = StarColumnConverter.insertRenameProject(phyRelNode); /* * 1.) * Join might cause naming conflicts from its left and right child. * In such case, we have to insert Project to rename the conflicting names. */ phyRelNode = JoinPrelRenameVisitor.insertRenameProject(phyRelNode); /* * 1.1) Swap left / right for INNER hash join, if left's row count is < (1 + margin) right's row count. * We want to have smaller dataset on the right side, since hash table builds on right side. */ if (context.getPlannerSettings().isHashJoinSwapEnabled()) { phyRelNode = SwapHashJoinVisitor.swapHashJoin( phyRelNode, new Double(context.getPlannerSettings().getHashJoinSwapMarginFactor())); } /* * 1.2) Break up all expressions with complex outputs into their own project operations */ phyRelNode = ((Prel) phyRelNode) .accept( new SplitUpComplexExpressions( planner.getTypeFactory(), context.getDrillOperatorTable(), context.getPlannerSettings().functionImplementationRegistry), null); /* * 1.3) Projections that contain reference to flatten are rewritten as Flatten operators followed by Project */ phyRelNode = ((Prel) phyRelNode) .accept( new RewriteProjectToFlatten( planner.getTypeFactory(), context.getDrillOperatorTable()), null); /* * 2.) * Since our operators work via names rather than indices, we have to make to reorder any * output before we return data to the user as we may have accidentally shuffled things. * This adds a trivial project to reorder columns prior to output. */ phyRelNode = FinalColumnReorderer.addFinalColumnOrdering(phyRelNode); /* * 3.) * If two fragments are both estimated to be parallelization one, remove the exchange * separating them */ phyRelNode = ExcessiveExchangeIdentifier.removeExcessiveEchanges(phyRelNode, targetSliceSize); /* 4.) * Add ProducerConsumer after each scan if the option is set * Use the configured queueSize */ /* DRILL-1617 Disabling ProducerConsumer as it produces incorrect results if (context.getOptions().getOption(PlannerSettings.PRODUCER_CONSUMER.getOptionName()).bool_val) { long queueSize = context.getOptions().getOption(PlannerSettings.PRODUCER_CONSUMER_QUEUE_SIZE.getOptionName()).num_val; phyRelNode = ProducerConsumerPrelVisitor.addProducerConsumerToScans(phyRelNode, (int) queueSize); } */ /* 5.) * if the client does not support complex types (Map, Repeated) * insert a project which which would convert */ if (!context.getSession().isSupportComplexTypes()) { logger.debug("Client does not support complex types, add ComplexToJson operator."); phyRelNode = ComplexToJsonPrelVisitor.addComplexToJsonPrel(phyRelNode); } /* 6.) * Insert LocalExchange (mux and/or demux) nodes */ phyRelNode = InsertLocalExchangeVisitor.insertLocalExchanges(phyRelNode, queryOptions); /* 7.) * Next, we add any required selection vector removers given the supported encodings of each * operator. This will ultimately move to a new trait but we're managing here for now to avoid * introducing new issues in planning before the next release */ phyRelNode = SelectionVectorPrelVisitor.addSelectionRemoversWhereNecessary(phyRelNode); /* 8.) * Finally, Make sure that the no rels are repeats. * This could happen in the case of querying the same table twice as Optiq may canonicalize these. */ phyRelNode = RelUniqifier.uniqifyGraph(phyRelNode); return phyRelNode; }