private void applyParameterMappings(Database catalog_db) { ParameterMappingsSet mappings = new ParameterMappingsSet(); // Load ParameterMappingSet from file if (m_paramMappingsFile != null) { try { mappings.load(m_paramMappingsFile, catalog_db); } catch (IOException ex) { String msg = "Failed to load ParameterMappingsSet file '" + m_paramMappingsFile + "'"; throw new RuntimeException(msg, ex); } } // Build ParameterMappingSet from user-provided inputs else { for (String procName : m_paramMappings.keySet()) { Procedure catalog_proc = catalog_db.getProcedures().getIgnoreCase(procName); assert (catalog_proc != null) : "Invalid Procedure name for ParameterMappings '" + procName + "'"; for (Integer procParamIdx : m_paramMappings.get(procName).keySet()) { ProcParameter catalog_procParam = catalog_proc.getParameters().get(procParamIdx.intValue()); assert (catalog_procParam != null) : "Invalid ProcParameter for '" + procName + "' at offset " + procParamIdx; Pair<String, Integer> stmtPair = m_paramMappings.get(procName).get(procParamIdx); assert (stmtPair != null); Statement catalog_stmt = catalog_proc.getStatements().getIgnoreCase(stmtPair.getFirst()); assert (catalog_stmt != null) : "Invalid Statement name '" + stmtPair.getFirst() + "' for ParameterMappings " + "for Procedure '" + procName + "'"; StmtParameter catalog_stmtParam = catalog_stmt.getParameters().get(stmtPair.getSecond().intValue()); assert (catalog_stmtParam != null) : "Invalid StmtParameter for '" + catalog_stmt.fullName() + "' at offset " + stmtPair.getSecond(); // HACK: This assumes that the ProcParameter is not an array // and that we want to map the first invocation of the Statement // directly to the ProcParameter. ParameterMapping pm = new ParameterMapping(catalog_stmt, 0, catalog_stmtParam, catalog_procParam, 0, 1.0); mappings.add(pm); } // FOR (ProcParameter) } // FOR (Procedure) } // Apply it! ParametersUtil.applyParameterMappings(catalog_db, mappings); }
protected Statement getStatement(Database catalog_db, Procedure catalog_proc, String stmt_name) { assertNotNull(catalog_db); assertNotNull(catalog_proc); Statement catalog_stmt = catalog_proc.getStatements().get(stmt_name); assert (catalog_stmt != null) : "Failed to retrieve Statement '" + stmt_name + "' from Procedure '" + catalog_proc.getName() + "'"; return (catalog_stmt); }
public Statement getStatementById(int stmtId) { // HACK: The first call will actually build the cache if (this.stmtIdXref.isEmpty()) { synchronized (this.stmtIdXref) { if (this.stmtIdXref.isEmpty()) { for (Procedure catalog_proc : this.procedures.values()) { for (Statement catalog_stmt : catalog_proc.getStatements().values()) { this.stmtIdXref.put(catalog_stmt.getId(), catalog_stmt); } // FOR (stmt) } // FOR (proc) } } // SYNCH } return (this.stmtIdXref.get(stmtId)); }
static String generateStatementsTable(Procedure procedure) { StringBuilder sb = new StringBuilder(); sb.append( " <table class='table tableL2 table-condensed'>\n <thead><tr>" + "<th><span style='white-space: nowrap;'>Statement Name</span></th>" + "<th>Statement SQL</th>" + "<th>Params</th>" + "<th>R/W</th>" + "<th>Attributes</th>" + "</tr></thead>\n <tbody>\n"); for (Statement statement : procedure.getStatements()) { sb.append(genrateStatementRow(procedure, statement)); } sb.append(" </tbody>\n </table>\n"); return sb.toString(); }
private void applyPrefetchableFlags(Database catalog_db) { for (Procedure catalog_proc : catalog_db.getProcedures()) { boolean proc_prefetchable = false; for (Statement statement : catalog_proc.getStatements()) { boolean stmt_prefetchable = true; for (StmtParameter stmtParam : statement.getParameters()) { if (stmtParam.getProcparameter() == null) { stmt_prefetchable = false; break; } } // FOR (StmtParameter) if (stmt_prefetchable) { statement.setPrefetchable(true); proc_prefetchable = true; } } // FOR (Statement) if (proc_prefetchable) { catalog_proc.setPrefetchable(true); } } // FOR (Procedure) }
private void initPlanFragments() { Set<PlanFragment> allFrags = new HashSet<PlanFragment>(); for (Procedure proc : database.getProcedures()) { for (Statement stmt : proc.getStatements()) { allFrags.clear(); allFrags.addAll(stmt.getFragments()); allFrags.addAll(stmt.getMs_fragments()); for (PlanFragment frag : allFrags) { Collection<Table> tables = CatalogUtil.getReferencedTables(frag); int tableIds[] = new int[tables.size()]; int i = 0; for (Table tbl : tables) { tableIds[i++] = tbl.getRelativeIndex(); } // FOR if (frag.getReadonly()) { this.fragmentReadTables.put(Long.valueOf(frag.getId()), tableIds); } else { this.fragmentWriteTables.put(Long.valueOf(frag.getId()), tableIds); } } // FOR (frag) } // FOR (stmt) } // FOR (proc) }
static void compileSingleStmtProcedure( VoltCompiler compiler, HSQLInterface hsql, DatabaseEstimates estimates, Catalog catalog, Database db, ProcedureDescriptor procedureDescriptor) throws VoltCompiler.VoltCompilerException { final String className = procedureDescriptor.m_className; if (className.indexOf('@') != -1) { throw compiler.new VoltCompilerException("User procedure names can't contain \"@\"."); } // get the short name of the class (no package if a user procedure) // use the Table.<builtin> name (allowing the period) if builtin. String shortName = className; if (procedureDescriptor.m_builtInStmt == false) { String[] parts = className.split("\\."); shortName = parts[parts.length - 1]; } // add an entry to the catalog (using the full className) final Procedure procedure = db.getProcedures().add(shortName); for (String groupName : procedureDescriptor.m_authGroups) { final Group group = db.getGroups().get(groupName); if (group == null) { throw compiler .new VoltCompilerException( "Procedure " + className + " allows access by a role " + groupName + " that does not exist"); } final GroupRef groupRef = procedure.getAuthgroups().add(groupName); groupRef.setGroup(group); } procedure.setClassname(className); // sysprocs don't use the procedure compiler procedure.setSystemproc(false); procedure.setDefaultproc(procedureDescriptor.m_builtInStmt); procedure.setHasjava(false); // get the annotation // first try to get one that has been passed from the compiler ProcInfoData info = compiler.getProcInfoOverride(shortName); // then check for the usual one in the class itself // and create a ProcInfo.Data instance for it if (info == null) { info = new ProcInfoData(); if (procedureDescriptor.m_partitionString != null) { info.partitionInfo = procedureDescriptor.m_partitionString; info.singlePartition = true; } } assert (info != null); // ADD THE STATEMENT // add the statement to the catalog Statement catalogStmt = procedure.getStatements().add(VoltDB.ANON_STMT_NAME); // compile the statement StatementPartitioning partitioning = info.singlePartition ? StatementPartitioning.forceSP() : StatementPartitioning.forceMP(); // default to FASTER detmode because stmt procs can't feed read output into writes StatementCompiler.compileFromSqlTextAndUpdateCatalog( compiler, hsql, catalog, db, estimates, catalogStmt, procedureDescriptor.m_singleStmt, procedureDescriptor.m_joinOrder, DeterminismMode.FASTER, partitioning); // if the single stmt is not read only, then the proc is not read only boolean procHasWriteStmts = (catalogStmt.getReadonly() == false); // set the read onlyness of a proc procedure.setReadonly(procHasWriteStmts == false); int seqs = catalogStmt.getSeqscancount(); procedure.setHasseqscans(seqs > 0); // set procedure parameter types CatalogMap<ProcParameter> params = procedure.getParameters(); CatalogMap<StmtParameter> stmtParams = catalogStmt.getParameters(); // set the procedure parameter types from the statement parameter types int paramCount = 0; for (StmtParameter stmtParam : CatalogUtil.getSortedCatalogItems(stmtParams, "index")) { // name each parameter "param1", "param2", etc... ProcParameter procParam = params.add("param" + String.valueOf(paramCount)); procParam.setIndex(stmtParam.getIndex()); procParam.setIsarray(stmtParam.getIsarray()); procParam.setType(stmtParam.getJavatype()); paramCount++; } // parse the procinfo procedure.setSinglepartition(info.singlePartition); if (info.singlePartition) { parsePartitionInfo(compiler, db, procedure, info.partitionInfo); if (procedure.getPartitionparameter() >= params.size()) { String msg = "PartitionInfo parameter not a valid parameter for procedure: " + procedure.getClassname(); throw compiler.new VoltCompilerException(msg); } // TODO: The planner does not currently validate that a single-statement plan declared as // single-partition correctly uses // the designated parameter as a partitioning filter, maybe some day. // In theory, the PartitioningForStatement would confirm the use of (only) a parameter as a // partition key -- // or if the partition key was determined to be some other hard-coded constant (expression?) // it might display a warning // message that the passed parameter is assumed to be equal to that constant (expression). } else { if (partitioning.getCountOfIndependentlyPartitionedTables() == 1) { AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpressionForReport(); if (statementPartitionExpression != null) { // The planner has uncovered an overlooked opportunity to run the statement SP. String msg = null; if (statementPartitionExpression instanceof ParameterValueExpression) { msg = "This procedure would benefit from setting the attribute 'partitioninfo=" + partitioning.getFullColumnName() + ":" + ((ParameterValueExpression) statementPartitionExpression).getParameterIndex() + "'"; } else { String valueDescription = null; Object partitionValue = partitioning.getInferredPartitioningValue(); if (partitionValue == null) { // Statement partitioned on a runtime constant. This is likely to be cryptic, but // hopefully gets the idea across. valueDescription = "of " + statementPartitionExpression.explain(""); } else { valueDescription = partitionValue.toString(); // A simple constant value COULD have been a parameter. } msg = "This procedure would benefit from adding a parameter to be passed the value " + valueDescription + " and setting the attribute 'partitioninfo=" + partitioning.getFullColumnName() + ":" + paramCount + "'"; } compiler.addWarn(msg); } } } }
static void compileJavaProcedure( VoltCompiler compiler, HSQLInterface hsql, DatabaseEstimates estimates, Catalog catalog, Database db, ProcedureDescriptor procedureDescriptor, InMemoryJarfile jarOutput) throws VoltCompiler.VoltCompilerException { final String className = procedureDescriptor.m_className; final Language lang = procedureDescriptor.m_language; // Load the class given the class name Class<?> procClass = procedureDescriptor.m_class; // get the short name of the class (no package) String shortName = deriveShortProcedureName(className); // add an entry to the catalog final Procedure procedure = db.getProcedures().add(shortName); for (String groupName : procedureDescriptor.m_authGroups) { final Group group = db.getGroups().get(groupName); if (group == null) { throw compiler .new VoltCompilerException( "Procedure " + className + " allows access by a role " + groupName + " that does not exist"); } final GroupRef groupRef = procedure.getAuthgroups().add(groupName); groupRef.setGroup(group); } procedure.setClassname(className); // sysprocs don't use the procedure compiler procedure.setSystemproc(false); procedure.setDefaultproc(procedureDescriptor.m_builtInStmt); procedure.setHasjava(true); procedure.setLanguage(lang.name()); ProcedureAnnotation pa = (ProcedureAnnotation) procedure.getAnnotation(); if (pa == null) { pa = new ProcedureAnnotation(); procedure.setAnnotation(pa); } if (procedureDescriptor.m_scriptImpl != null) { // This is a Groovy or other Java derived procedure and we need to add an annotation with // the script to the Procedure element in the Catalog pa.scriptImpl = procedureDescriptor.m_scriptImpl; } // get the annotation // first try to get one that has been passed from the compiler ProcInfoData info = compiler.getProcInfoOverride(shortName); // check if partition info was set in ddl ProcInfoData ddlInfo = null; if (procedureDescriptor.m_partitionString != null && !procedureDescriptor.m_partitionString.trim().isEmpty()) { ddlInfo = new ProcInfoData(); ddlInfo.partitionInfo = procedureDescriptor.m_partitionString; ddlInfo.singlePartition = true; } // then check for the usual one in the class itself // and create a ProcInfo.Data instance for it if (info == null) { info = new ProcInfoData(); ProcInfo annotationInfo = procClass.getAnnotation(ProcInfo.class); // error out if partition info is present in both ddl and annotation if (annotationInfo != null) { if (ddlInfo != null) { String msg = "Procedure: " + shortName + " has partition properties defined both in "; msg += "class \"" + className + "\" and in the schema defintion file(s)"; throw compiler.new VoltCompilerException(msg); } // Prevent AutoGenerated DDL from including PARTITION PROCEDURE for this procedure. pa.classAnnotated = true; info.partitionInfo = annotationInfo.partitionInfo(); info.singlePartition = annotationInfo.singlePartition(); } else if (ddlInfo != null) { info = ddlInfo; } } else { pa.classAnnotated = true; } assert (info != null); // make sure multi-partition implies no partitoning info if (info.singlePartition == false) { if ((info.partitionInfo != null) && (info.partitionInfo.length() > 0)) { String msg = "Procedure: " + shortName + " is annotated as multi-partition"; msg += " but partitionInfo has non-empty value: \"" + info.partitionInfo + "\""; throw compiler.new VoltCompilerException(msg); } } // track if there are any writer statements and/or sequential scans and/or an overlooked common // partitioning parameter boolean procHasWriteStmts = false; boolean procHasSeqScans = false; // procWantsCommonPartitioning == true but commonPartitionExpression == null signifies a proc // for which the planner was requested to attempt to find an SP plan, but that was not possible // -- it had a replicated write or it had one or more partitioned reads that were not all // filtered by the same partition key value -- so it was planned as an MP proc. boolean procWantsCommonPartitioning = true; AbstractExpression commonPartitionExpression = null; String exampleSPstatement = null; Object exampleSPvalue = null; // iterate through the fields and get valid sql statements Map<String, Object> fields = lang.accept(procedureIntrospector(compiler), procClass); // determine if proc is read or read-write by checking if the proc contains any write sql stmts boolean readWrite = false; for (Object field : fields.values()) { if (!(field instanceof SQLStmt)) continue; SQLStmt stmt = (SQLStmt) field; QueryType qtype = QueryType.getFromSQL(stmt.getText()); if (!qtype.isReadOnly()) { readWrite = true; break; } } // default to FASTER determinism mode, which may favor non-deterministic plans // but if it's a read-write proc, use a SAFER planning mode wrt determinism. final DeterminismMode detMode = readWrite ? DeterminismMode.SAFER : DeterminismMode.FASTER; for (Entry<String, Object> entry : fields.entrySet()) { if (!(entry.getValue() instanceof SQLStmt)) continue; String stmtName = entry.getKey(); SQLStmt stmt = (SQLStmt) entry.getValue(); // add the statement to the catalog Statement catalogStmt = procedure.getStatements().add(stmtName); // compile the statement StatementPartitioning partitioning = info.singlePartition ? StatementPartitioning.forceSP() : StatementPartitioning.forceMP(); boolean cacheHit = StatementCompiler.compileFromSqlTextAndUpdateCatalog( compiler, hsql, catalog, db, estimates, catalogStmt, stmt.getText(), stmt.getJoinOrder(), detMode, partitioning); // if this was a cache hit or specified single, don't worry about figuring out more // partitioning if (partitioning.wasSpecifiedAsSingle() || cacheHit) { procWantsCommonPartitioning = false; // Don't try to infer what's already been asserted. // The planner does not currently attempt to second-guess a plan declared as // single-partition, maybe some day. // In theory, the PartitioningForStatement would confirm the use of (only) a parameter as a // partition key -- // or if the partition key was determined to be some other constant (expression?) it might // display an informational // message that the passed parameter is assumed to be equal to the hard-coded partition key // constant (expression). // Validate any inferred statement partitioning given the statement's possible usage, until // a contradiction is found. } else if (procWantsCommonPartitioning) { // Only consider statements that are capable of running SP with a partitioning parameter // that does not seem to // conflict with the partitioning of prior statements. if (partitioning.getCountOfIndependentlyPartitionedTables() == 1) { AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpressionForReport(); if (statementPartitionExpression != null) { if (commonPartitionExpression == null) { commonPartitionExpression = statementPartitionExpression; exampleSPstatement = stmt.getText(); exampleSPvalue = partitioning.getInferredPartitioningValue(); } else if (commonPartitionExpression.equals(statementPartitionExpression) || (statementPartitionExpression instanceof ParameterValueExpression && commonPartitionExpression instanceof ParameterValueExpression)) { // Any constant used for partitioning would have to be the same for all statements, // but // any statement parameter used for partitioning MIGHT come from the same proc // parameter as // any other statement's parameter used for partitioning. } else { procWantsCommonPartitioning = false; // appears to be different partitioning for different statements } } else { // There is a statement with a partitioned table whose partitioning column is // not equality filtered with a constant or param. Abandon all hope. procWantsCommonPartitioning = false; } // Usually, replicated-only statements in a mix with others have no effect on the MP/SP // decision } else if (partitioning.getCountOfPartitionedTables() == 0) { // but SP is strictly forbidden for DML, to maintain the consistency of the replicated // data. if (partitioning.getIsReplicatedTableDML()) { procWantsCommonPartitioning = false; } } else { // There is a statement with a partitioned table whose partitioning column is // not equality filtered with a constant or param. Abandon all hope. procWantsCommonPartitioning = false; } } // if a single stmt is not read only, then the proc is not read only if (catalogStmt.getReadonly() == false) { procHasWriteStmts = true; } if (catalogStmt.getSeqscancount() > 0) { procHasSeqScans = true; } } // MIGHT the planner have uncovered an overlooked opportunity to run all statements SP? if (procWantsCommonPartitioning && (commonPartitionExpression != null)) { String msg = null; if (commonPartitionExpression instanceof ParameterValueExpression) { msg = "This procedure might benefit from an @ProcInfo annotation designating parameter " + ((ParameterValueExpression) commonPartitionExpression).getParameterIndex() + " of statement '" + exampleSPstatement + "'"; } else { String valueDescription = null; if (exampleSPvalue == null) { // Statements partitioned on a runtime constant. This is likely to be cryptic, but // hopefully gets the idea across. valueDescription = "of " + commonPartitionExpression.explain(""); } else { valueDescription = exampleSPvalue.toString(); // A simple constant value COULD have been a parameter. } msg = "This procedure might benefit from an @ProcInfo annotation referencing an added parameter passed the value " + valueDescription; } compiler.addInfo(msg); } // set the read onlyness of a proc procedure.setReadonly(procHasWriteStmts == false); procedure.setHasseqscans(procHasSeqScans); for (Statement catalogStmt : procedure.getStatements()) { if (catalogStmt.getIscontentdeterministic() == false) { String potentialErrMsg = "Procedure " + shortName + " has a statement with a non-deterministic result - statement: \"" + catalogStmt.getSqltext() + "\" , reason: " + catalogStmt.getNondeterminismdetail(); // throw compiler.new VoltCompilerException(potentialErrMsg); compiler.addWarn(potentialErrMsg); } else if (catalogStmt.getIsorderdeterministic() == false) { String warnMsg; if (procHasWriteStmts) { String rwPotentialErrMsg = "Procedure " + shortName + " is RW and has a statement whose result has a non-deterministic ordering - statement: \"" + catalogStmt.getSqltext() + "\", reason: " + catalogStmt.getNondeterminismdetail(); // throw compiler.new VoltCompilerException(rwPotentialErrMsg); warnMsg = rwPotentialErrMsg; } else { warnMsg = "Procedure " + shortName + " has a statement with a non-deterministic result - statement: \"" + catalogStmt.getSqltext() + "\", reason: " + catalogStmt.getNondeterminismdetail(); } compiler.addWarn(warnMsg); } } // set procedure parameter types CatalogMap<ProcParameter> params = procedure.getParameters(); Class<?>[] paramTypes = lang.accept(procedureEntryPointParametersTypeExtractor, fields); for (int i = 0; i < paramTypes.length; i++) { Class<?> cls = paramTypes[i]; ProcParameter param = params.add(String.valueOf(i)); param.setIndex(i); // handle the case where the param is an array if (cls.isArray()) { param.setIsarray(true); cls = cls.getComponentType(); } else param.setIsarray(false); // boxed types are not supported parameters at this time if ((cls == Long.class) || (cls == Integer.class) || (cls == Short.class) || (cls == Byte.class) || (cls == Double.class) || (cls == Character.class) || (cls == Boolean.class)) { String msg = "Procedure: " + shortName + " has a parameter with a boxed type: "; msg += cls.getSimpleName(); msg += ". Replace this parameter with the corresponding primitive type and the procedure may compile."; throw compiler.new VoltCompilerException(msg); } else if ((cls == Float.class) || (cls == float.class)) { String msg = "Procedure: " + shortName + " has a parameter with type: "; msg += cls.getSimpleName(); msg += ". Replace this parameter type with double and the procedure may compile."; throw compiler.new VoltCompilerException(msg); } VoltType type; try { type = VoltType.typeFromClass(cls); } catch (VoltTypeException e) { // handle the case where the type is invalid String msg = "Procedure: " + shortName + " has a parameter with invalid type: "; msg += cls.getSimpleName(); throw compiler.new VoltCompilerException(msg); } catch (RuntimeException e) { String msg = "Procedure: " + shortName + " unexpectedly failed a check on a parameter of type: "; msg += cls.getSimpleName(); msg += " with error: "; msg += e.toString(); throw compiler.new VoltCompilerException(msg); } param.setType(type.getValue()); } // parse the procinfo procedure.setSinglepartition(info.singlePartition); if (info.singlePartition) { parsePartitionInfo(compiler, db, procedure, info.partitionInfo); if (procedure.getPartitionparameter() >= paramTypes.length) { String msg = "PartitionInfo parameter not a valid parameter for procedure: " + procedure.getClassname(); throw compiler.new VoltCompilerException(msg); } // check the type of partition parameter meets our high standards Class<?> partitionType = paramTypes[procedure.getPartitionparameter()]; Class<?>[] validPartitionClzzes = { Long.class, Integer.class, Short.class, Byte.class, long.class, int.class, short.class, byte.class, String.class, byte[].class }; boolean found = false; for (Class<?> candidate : validPartitionClzzes) { if (partitionType == candidate) found = true; } if (!found) { String msg = "PartitionInfo parameter must be a String or Number for procedure: " + procedure.getClassname(); throw compiler.new VoltCompilerException(msg); } VoltType columnType = VoltType.get((byte) procedure.getPartitioncolumn().getType()); VoltType paramType = VoltType.typeFromClass(partitionType); if (!columnType.canExactlyRepresentAnyValueOf(paramType)) { String msg = "Type mismatch between partition column and partition parameter for procedure " + procedure.getClassname() + " may cause overflow or loss of precision.\nPartition column is type " + columnType + " and partition parameter is type " + paramType; throw compiler.new VoltCompilerException(msg); } else if (!paramType.canExactlyRepresentAnyValueOf(columnType)) { String msg = "Type mismatch between partition column and partition parameter for procedure " + procedure.getClassname() + " does not allow the full range of partition key values.\nPartition column is type " + columnType + " and partition parameter is type " + paramType; compiler.addWarn(msg); } } // put the compiled code for this procedure into the jarfile // need to find the outermost ancestor class for the procedure in the event // that it's actually an inner (or inner inner...) class. // addClassToJar recursively adds all the children, which should include this // class Class<?> ancestor = procClass; while (ancestor.getEnclosingClass() != null) { ancestor = ancestor.getEnclosingClass(); } compiler.addClassToJar(jarOutput, ancestor); }
@SuppressWarnings("deprecation") @Override public void setUp() throws IOException, InterruptedException { VoltDB.instance().readBuildInfo("Test"); // compile a catalog String testDir = BuildDirectoryUtils.getBuildDirectoryPath(); String catalogJar = testDir + File.separator + JAR; TPCCProjectBuilder pb = new TPCCProjectBuilder(); pb.addDefaultSchema(); pb.addDefaultPartitioning(); pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class); pb.compile(catalogJar, 2, 0); // load a catalog byte[] bytes = CatalogUtil.toBytes(new File(catalogJar)); String serializedCatalog = CatalogUtil.loadCatalogFromJar(bytes, null); // create the catalog (that will be passed to the ClientInterface catalog = new Catalog(); catalog.execute(serializedCatalog); // update the catalog with the data from the deployment file String pathToDeployment = pb.getPathToDeployment(); assertTrue(CatalogUtil.compileDeploymentAndGetCRC(catalog, pathToDeployment, true) >= 0); cluster = catalog.getClusters().get("cluster"); CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures(); Procedure insertProc = procedures.get("InsertNewOrder"); assert (insertProc != null); selectProc = procedures.get("MultiSiteSelect"); assert (selectProc != null); // Each EE needs its own thread for correct initialization. final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>(); final byte configBytes[] = LegacyHashinator.getConfigureBytes(2); Thread site1Thread = new Thread() { @Override public void run() { site1Reference.set( new ExecutionEngineJNI( cluster.getRelativeIndex(), 1, 0, 0, "", 100, HashinatorType.LEGACY, configBytes)); } }; site1Thread.start(); site1Thread.join(); final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>(); Thread site2Thread = new Thread() { @Override public void run() { site2Reference.set( new ExecutionEngineJNI( cluster.getRelativeIndex(), 2, 1, 0, "", 100, HashinatorType.LEGACY, configBytes)); } }; site2Thread.start(); site2Thread.join(); // create two EEs site1 = new ExecutionSite(0); // site 0 ee1 = site1Reference.get(); ee1.loadCatalog(0, catalog.serialize()); site2 = new ExecutionSite(1); // site 1 ee2 = site2Reference.get(); ee2.loadCatalog(0, catalog.serialize()); // cache some plan fragments selectStmt = selectProc.getStatements().get("selectAll"); assert (selectStmt != null); int i = 0; // this kinda assumes the right order for (PlanFragment f : selectStmt.getFragments()) { if (i == 0) selectTopFrag = f; else selectBottomFrag = f; i++; } assert (selectTopFrag != null); assert (selectBottomFrag != null); if (selectTopFrag.getHasdependencies() == false) { PlanFragment temp = selectTopFrag; selectTopFrag = selectBottomFrag; selectBottomFrag = temp; } // get the insert frag Statement insertStmt = insertProc.getStatements().get("insert"); assert (insertStmt != null); for (PlanFragment f : insertStmt.getFragments()) insertFrag = f; // populate plan cache ActivePlanRepository.clear(); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(selectBottomFrag), Encoder.base64Decode(selectBottomFrag.getPlannodetree())); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(selectTopFrag), Encoder.base64Decode(selectTopFrag.getPlannodetree())); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(insertFrag), Encoder.base64Decode(insertFrag.getPlannodetree())); // insert some data ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L); VoltTable[] results = ee2.executePlanFragments( 1, new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)}, null, new ParameterSet[] {params}, 1, 0, 42, Long.MAX_VALUE); assert (results.length == 1); assert (results[0].asScalarLong() == 1L); params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L); results = ee1.executePlanFragments( 1, new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)}, null, new ParameterSet[] {params}, 2, 1, 42, Long.MAX_VALUE); assert (results.length == 1); assert (results[0].asScalarLong() == 1L); }
public MultiPartitionParticipantTxnState( Mailbox mbox, ExecutionSite site, TransactionInfoBaseMessage notice) { super(mbox, site, notice); m_hsId = site.getSiteId(); m_nonCoordinatingSites = null; m_isCoordinator = false; m_context = site.m_context; // Check to make sure we are the coordinator, it is possible to get an intiate task // where we aren't the coordinator because we are a replica of the coordinator. if (notice instanceof InitiateTaskMessage) { // keep this around for DR purposes m_invocation = ((InitiateTaskMessage) notice).getStoredProcedureInvocation(); // Determine if mismatched results are okay. if (m_invocation != null) { String procName = m_invocation.getProcName(); if (procName.startsWith("@AdHoc")) { // For now the best we can do with ad hoc is to always allow mismatched results. // We don't know if it's non-deterministic or not. But the main use case for // being lenient is "SELECT * FROM TABLE LIMIT n", typically run as ad hoc. m_allowMismatchedResults = true; } else { // Walk through the statements to see if any are non-deterministic. if (m_context != null && m_context.procedures != null) { Procedure proc = m_context.procedures.get(procName); if (proc != null) { CatalogMap<Statement> stmts = proc.getStatements(); if (stmts != null) { for (Statement stmt : stmts) { if (!stmt.getIscontentdeterministic() || !stmt.getIsorderdeterministic()) { m_allowMismatchedResults = true; break; } } } } } } } if (notice.getCoordinatorHSId() == m_hsId) { m_isCoordinator = true; m_task = (InitiateTaskMessage) notice; m_durabilityFlag = m_task.getDurabilityFlagIfItExists(); SiteTracker tracker = site.getSiteTracker(); m_readyWorkUnits.add( new WorkUnit(tracker, m_task, null, m_hsId, null, false, m_allowMismatchedResults)); /* * ENG-3374: Use the same set of non-coordinator sites the * initiator sent out the participant notices to, so that when * the coordinator send out the fragment works all participants * will get them. * * During rejoin, the initiator's site tracker and the * coordinator's site tracker may not be consistent for a brief * period of time. So can't rely on the site tracker to tell the * coordinator which sites to send work to. */ m_nonCoordinatingSites = m_task.getNonCoordinatorSites(); } else { m_durabilityFlag = ((InitiateTaskMessage) notice).getDurabilityFlagIfItExists(); m_task = null; } } else { m_task = null; m_durabilityFlag = null; m_invocation = null; } }
protected void reflect() { // fill in the sql for single statement procs if (m_catProc.getHasjava() == false) { try { Map<String, Field> stmtMap = ProcedureCompiler.getValidSQLStmts(null, m_procedureName, m_procedure.getClass(), true); Field f = stmtMap.get(VoltDB.ANON_STMT_NAME); assert (f != null); SQLStmt stmt = (SQLStmt) f.get(m_procedure); Statement statement = m_catProc.getStatements().get(VoltDB.ANON_STMT_NAME); stmt.sqlText = statement.getSqltext().getBytes(VoltDB.UTF8ENCODING); m_cachedSingleStmt.stmt = stmt; int numParams = m_catProc.getParameters().size(); m_paramTypes = new Class<?>[numParams]; m_paramTypeIsPrimitive = new boolean[numParams]; m_paramTypeIsArray = new boolean[numParams]; m_paramTypeComponentType = new Class<?>[numParams]; for (ProcParameter param : m_catProc.getParameters()) { VoltType type = VoltType.get((byte) param.getType()); if (type == VoltType.INTEGER) { type = VoltType.BIGINT; } else if (type == VoltType.SMALLINT) { type = VoltType.BIGINT; } else if (type == VoltType.TINYINT) { type = VoltType.BIGINT; } else if (type == VoltType.NUMERIC) { type = VoltType.FLOAT; } m_paramTypes[param.getIndex()] = type.classFromType(); m_paramTypeIsPrimitive[param.getIndex()] = m_paramTypes[param.getIndex()].isPrimitive(); m_paramTypeIsArray[param.getIndex()] = param.getIsarray(); assert (m_paramTypeIsArray[param.getIndex()] == false); m_paramTypeComponentType[param.getIndex()] = null; // rtb: what is broken (ambiguous?) that is being patched here? // hack to fixup varbinary support for statement procedures if (m_paramTypes[param.getIndex()] == byte[].class) { m_paramTypeComponentType[param.getIndex()] = byte.class; m_paramTypeIsArray[param.getIndex()] = true; } } } catch (Exception e) { // shouldn't throw anything outside of the compiler e.printStackTrace(); } } else { // parse the java run method Method[] methods = m_procedure.getClass().getDeclaredMethods(); for (final Method m : methods) { String name = m.getName(); if (name.equals("run")) { if (Modifier.isPublic(m.getModifiers()) == false) continue; m_procMethod = m; m_paramTypes = m.getParameterTypes(); int tempParamTypesLength = m_paramTypes.length; m_paramTypeIsPrimitive = new boolean[tempParamTypesLength]; m_paramTypeIsArray = new boolean[tempParamTypesLength]; m_paramTypeComponentType = new Class<?>[tempParamTypesLength]; for (int ii = 0; ii < tempParamTypesLength; ii++) { m_paramTypeIsPrimitive[ii] = m_paramTypes[ii].isPrimitive(); m_paramTypeIsArray[ii] = m_paramTypes[ii].isArray(); m_paramTypeComponentType[ii] = m_paramTypes[ii].getComponentType(); } } } if (m_procMethod == null) { throw new RuntimeException( "No \"run\" method found in: " + m_procedure.getClass().getName()); } } // iterate through the fields and deal with sql statements Map<String, Field> stmtMap = null; try { stmtMap = ProcedureCompiler.getValidSQLStmts(null, m_procedureName, m_procedure.getClass(), true); } catch (Exception e1) { // shouldn't throw anything outside of the compiler e1.printStackTrace(); return; } Field[] fields = new Field[stmtMap.size()]; int index = 0; for (Field f : stmtMap.values()) { fields[index++] = f; } for (final Field f : fields) { String name = f.getName(); Statement s = m_catProc.getStatements().get(name); if (s != null) { try { /* * Cache all the information we need about the statements in this stored * procedure locally instead of pulling them from the catalog on * a regular basis. */ SQLStmt stmt = (SQLStmt) f.get(m_procedure); // done in a static method in an abstract class so users don't call it initSQLStmt(stmt, s); } catch (IllegalArgumentException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } // LOG.fine("Found statement " + name); } } }
public ClientResponseImpl call(Object... paramListIn) { // verify per-txn state has been reset assert (m_statusCode == ClientResponse.UNINITIALIZED_APP_STATUS_CODE); assert (m_statusString == null); assert (m_cachedRNG == null); // reset the hash of results m_inputCRC.reset(); // use local var to avoid warnings about reassigning method argument Object[] paramList = paramListIn; ClientResponseImpl retval = null; // assert no sql is queued assert (m_batch.size() == 0); try { m_statsCollector.beginProcedure(); byte status = ClientResponse.SUCCESS; VoltTable[] results = null; // inject sysproc execution context as the first parameter. if (isSystemProcedure()) { final Object[] combinedParams = new Object[paramList.length + 1]; combinedParams[0] = m_systemProcedureContext; for (int i = 0; i < paramList.length; ++i) combinedParams[i + 1] = paramList[i]; // swap the lists. paramList = combinedParams; } if (paramList.length != m_paramTypes.length) { m_statsCollector.endProcedure(false, true, null, null); String msg = "PROCEDURE " + m_procedureName + " EXPECTS " + String.valueOf(m_paramTypes.length) + " PARAMS, BUT RECEIVED " + String.valueOf(paramList.length); status = ClientResponse.GRACEFUL_FAILURE; return getErrorResponse(status, msg, null); } for (int i = 0; i < m_paramTypes.length; i++) { try { paramList[i] = ParameterConverter.tryToMakeCompatible( m_paramTypeIsPrimitive[i], m_paramTypeIsArray[i], m_paramTypes[i], m_paramTypeComponentType[i], paramList[i]); } catch (Exception e) { m_statsCollector.endProcedure(false, true, null, null); String msg = "PROCEDURE " + m_procedureName + " TYPE ERROR FOR PARAMETER " + i + ": " + e.toString(); status = ClientResponse.GRACEFUL_FAILURE; return getErrorResponse(status, msg, null); } } boolean error = false; boolean abort = false; // run a regular java class if (m_catProc.getHasjava()) { try { if (log.isTraceEnabled()) { log.trace( "invoking... procMethod=" + m_procMethod.getName() + ", class=" + getClass().getName()); } try { Object rawResult = m_procMethod.invoke(m_procedure, paramList); results = getResultsFromRawResults(rawResult); } catch (IllegalAccessException e) { // If reflection fails, invoke the same error handling that other exceptions do throw new InvocationTargetException(e); } log.trace("invoked"); } catch (InvocationTargetException itex) { // itex.printStackTrace(); Throwable ex = itex.getCause(); if (ex instanceof VoltAbortException && !(ex instanceof EEException)) { abort = true; } else { error = true; } if (ex instanceof Error) { m_statsCollector.endProcedure(false, true, null, null); throw (Error) ex; } retval = getErrorResponse(ex); } } // single statement only work // (this could be made faster, but with less code re-use) else { assert (m_catProc.getStatements().size() == 1); try { m_cachedSingleStmt.params = getCleanParams(m_cachedSingleStmt.stmt, paramList); if (getHsqlBackendIfExists() != null) { // HSQL handling CatalogMap<StmtParameter> sparamsMap = m_cachedSingleStmt.stmt.catStmt.getParameters(); List<StmtParameter> sparams = CatalogUtil.getSortedCatalogItems(sparamsMap, "index"); VoltTable table = getHsqlBackendIfExists() .runSQLWithSubstitutions( m_cachedSingleStmt.stmt, m_cachedSingleStmt.params, sparams); results = new VoltTable[] {table}; } else { m_batch.add(m_cachedSingleStmt); results = voltExecuteSQL(true); } } catch (SerializableException ex) { retval = getErrorResponse(ex); } } // Record statistics for procedure call. StoredProcedureInvocation invoc = (m_txnState != null ? m_txnState.getInvocation() : null); ParameterSet paramSet = (invoc != null ? invoc.getParams() : null); m_statsCollector.endProcedure(abort, error, results, paramSet); // don't leave empty handed if (results == null) results = new VoltTable[0]; if (retval == null) retval = new ClientResponseImpl(status, m_statusCode, m_statusString, results, null); int hash = (int) m_inputCRC.getValue(); if ((retval.getStatus() == ClientResponse.SUCCESS) && (hash != 0)) { retval.setHash(hash); } if ((m_txnState != null) && // may be null for tests (m_txnState.getInvocation() != null) && (m_txnState.getInvocation().getType() == ProcedureInvocationType.REPLICATED)) { retval.convertResultsToHashForDeterminism(); } } finally { // finally at the call(..) scope to ensure params can be // garbage collected and that the queue will be empty for // the next call m_batch.clear(); // reset other per-txn state m_txnState = null; m_statusCode = ClientResponse.UNINITIALIZED_APP_STATUS_CODE; m_statusString = null; m_cachedRNG = null; m_cachedSingleStmt.params = null; m_cachedSingleStmt.expectation = null; m_seenFinalBatch = false; } return retval; }
/** * Get some embeddable HTML of some generic catalog/application stats that is drawn on the first * page of the report. */ static String getStatsHTML(Database db, ArrayList<Feedback> warnings) { StringBuilder sb = new StringBuilder(); sb.append("<table class='table table-condensed'>\n"); // count things int indexes = 0, views = 0, statements = 0; int partitionedTables = 0, replicatedTables = 0; int partitionedProcs = 0, replicatedProcs = 0; int readProcs = 0, writeProcs = 0; for (Table t : db.getTables()) { if (t.getMaterializer() != null) { views++; } else { if (t.getIsreplicated()) { replicatedTables++; } else { partitionedTables++; } } indexes += t.getIndexes().size(); } for (Procedure p : db.getProcedures()) { // skip auto-generated crud procs if (p.getDefaultproc()) { continue; } if (p.getSinglepartition()) { partitionedProcs++; } else { replicatedProcs++; } if (p.getReadonly()) { readProcs++; } else { writeProcs++; } statements += p.getStatements().size(); } // version sb.append("<tr><td>Compiled by VoltDB Version</td><td>"); sb.append(VoltDB.instance().getVersionString()).append("</td></tr>\n"); // timestamp sb.append("<tr><td>Compiled on</td><td>"); SimpleDateFormat sdf = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss z"); sb.append(sdf.format(m_timestamp)).append("</td></tr>\n"); // tables sb.append("<tr><td>Table Count</td><td>"); sb.append( String.format( "%d (%d partitioned / %d replicated)", partitionedTables + replicatedTables, partitionedTables, replicatedTables)); sb.append("</td></tr>\n"); // views sb.append("<tr><td>Materialized View Count</td><td>").append(views).append("</td></tr>\n"); // indexes sb.append("<tr><td>Index Count</td><td>").append(indexes).append("</td></tr>\n"); // procedures sb.append("<tr><td>Procedure Count</td><td>"); sb.append( String.format( "%d (%d partitioned / %d replicated) (%d read-only / %d read-write)", partitionedProcs + replicatedProcs, partitionedProcs, replicatedProcs, readProcs, writeProcs)); sb.append("</td></tr>\n"); // statements sb.append("<tr><td>SQL Statement Count</td><td>").append(statements).append("</td></tr>\n"); sb.append("</table>\n\n"); // warnings, add warning section if any if (warnings.size() > 0) { sb.append("<h4>Warnings</h4>"); sb.append("<table class='table table-condensed'>\n"); for (Feedback warning : warnings) { String procName = warning.getFileName().replace(".class", ""); String nameLink = ""; // not a warning during compiling procedures, must from the schema if (procName.compareToIgnoreCase("null") == 0) { String schemaName = ""; String warningMsg = warning.getMessage().toLowerCase(); if (warningMsg.contains("table ")) { int begin = warningMsg.indexOf("table ") + 6; int end = (warningMsg.substring(begin)).indexOf(" "); schemaName = warningMsg.substring(begin, begin + end); } nameLink = "<a href='#s-" + schemaName + "'>" + schemaName.toUpperCase() + "</a>"; } else { nameLink = "<a href='#p-" + procName.toLowerCase() + "'>" + procName + "</a>"; } sb.append("<tr><td>") .append(nameLink) .append("</td><td>") .append(warning.getMessage()) .append("</td></tr>\n"); } sb.append("").append("</table>\n").append("</td></tr>\n"); } return sb.toString(); }
static String generateProcedureRow(Procedure procedure) { StringBuilder sb = new StringBuilder(); sb.append("<tr class='primaryrow'>"); // column 1: procedure name String anchor = procedure.getTypeName().toLowerCase(); sb.append( "<td style='white-space: nowrap'><i id='p-" + anchor + "--icon' class='icon-chevron-right'></i> <a href='#p-"); sb.append(anchor).append("' id='p-").append(anchor).append("' class='togglex'>"); sb.append(procedure.getTypeName()); sb.append("</a></td>"); // column 2: parameter types sb.append("<td>"); List<ProcParameter> params = CatalogUtil.getSortedCatalogItems(procedure.getParameters(), "index"); List<String> paramTypes = new ArrayList<String>(); for (ProcParameter param : params) { String paramType = VoltType.get((byte) param.getType()).name(); if (param.getIsarray()) { paramType += "[]"; } paramTypes.add(paramType); } if (paramTypes.size() == 0) { sb.append("<i>None</i>"); } sb.append(StringUtils.join(paramTypes, ", ")); sb.append("</td>"); // column 3: partitioning sb.append("<td>"); if (procedure.getSinglepartition()) { tag(sb, "success", "Single"); } else { tag(sb, "warning", "Multi"); } sb.append("</td>"); // column 4: read/write sb.append("<td>"); if (procedure.getReadonly()) { tag(sb, "success", "Read"); } else { tag(sb, "warning", "Write"); } sb.append("</td>"); // column 5: access sb.append("<td>"); List<String> groupNames = new ArrayList<String>(); for (GroupRef groupRef : procedure.getAuthgroups()) { groupNames.add(groupRef.getGroup().getTypeName()); } if (groupNames.size() == 0) { sb.append("<i>None</i>"); } sb.append(StringUtils.join(groupNames, ", ")); sb.append("</td>"); // column 6: attributes sb.append("<td>"); if (procedure.getHasjava()) { tag(sb, "info", "Java"); } else { tag(sb, null, "Single-Stmt"); } boolean isND = false; int scanCount = 0; for (Statement stmt : procedure.getStatements()) { scanCount += stmt.getSeqscancount(); if (!stmt.getIscontentdeterministic() || !stmt.getIsorderdeterministic()) { isND = false; } } if (isND) { tag(sb, "inverse", "Determinism"); } if (scanCount > 0) { tag(sb, "important", "Scans"); } sb.append("</td>"); sb.append("</tr>\n"); // BUILD THE DROPDOWN FOR THE STATEMENT/DETAIL TABLE sb.append( "<tr class='tablesorter-childRow'><td class='invert' colspan='6' id='p-" + procedure.getTypeName().toLowerCase() + "--dropdown'>\n"); // output partitioning parameter info if (procedure.getSinglepartition()) { String pTable = procedure.getPartitioncolumn().getParent().getTypeName(); String pColumn = procedure.getPartitioncolumn().getTypeName(); int pIndex = procedure.getPartitionparameter(); sb.append( String.format( "<p>Partitioned on parameter %d which maps to column %s" + " of table <a class='invert' href='#s-%s'>%s</a>.</p>", pIndex, pColumn, pTable, pTable)); } // output what schema this interacts with ProcedureAnnotation annotation = (ProcedureAnnotation) procedure.getAnnotation(); if (annotation != null) { // make sure tables appear in only one category annotation.tablesRead.removeAll(annotation.tablesUpdated); if (annotation.tablesRead.size() > 0) { sb.append("<p>Read-only access to tables: "); List<String> tables = new ArrayList<String>(); for (Table table : annotation.tablesRead) { tables.add("<a href='#s-" + table.getTypeName() + "'>" + table.getTypeName() + "</a>"); } sb.append(StringUtils.join(tables, ", ")); sb.append("</p>"); } if (annotation.tablesUpdated.size() > 0) { sb.append("<p>Read/Write access to tables: "); List<String> tables = new ArrayList<String>(); for (Table table : annotation.tablesUpdated) { tables.add("<a href='#s-" + table.getTypeName() + "'>" + table.getTypeName() + "</a>"); } sb.append(StringUtils.join(tables, ", ")); sb.append("</p>"); } if (annotation.indexesUsed.size() > 0) { sb.append("<p>Uses indexes: "); List<String> indexes = new ArrayList<String>(); for (Index index : annotation.indexesUsed) { Table table = (Table) index.getParent(); indexes.add( "<a href='#s-" + table.getTypeName() + "-" + index.getTypeName() + "'>" + index.getTypeName() + "</a>"); } sb.append(StringUtils.join(indexes, ", ")); sb.append("</p>"); } } sb.append(generateStatementsTable(procedure)); sb.append("</td></tr>\n"); return sb.toString(); }
/** * Compile and cache the statement and plan and return the final plan graph. * * @param sql * @param paramCount */ public List<AbstractPlanNode> compile( String sql, int paramCount, String joinOrder, Object partitionParameter, boolean inferSP, boolean lockInSP) { Statement catalogStmt = proc.getStatements().add("stmt-" + String.valueOf(compileCounter++)); catalogStmt.setSqltext(sql); catalogStmt.setSinglepartition(partitionParameter != null); catalogStmt.setBatched(false); catalogStmt.setParamnum(paramCount); // determine the type of the query QueryType qtype = QueryType.SELECT; catalogStmt.setReadonly(true); if (sql.toLowerCase().startsWith("insert")) { qtype = QueryType.INSERT; catalogStmt.setReadonly(false); } if (sql.toLowerCase().startsWith("update")) { qtype = QueryType.UPDATE; catalogStmt.setReadonly(false); } if (sql.toLowerCase().startsWith("delete")) { qtype = QueryType.DELETE; catalogStmt.setReadonly(false); } catalogStmt.setQuerytype(qtype.getValue()); // name will look like "basename-stmt-#" String name = catalogStmt.getParent().getTypeName() + "-" + catalogStmt.getTypeName(); DatabaseEstimates estimates = new DatabaseEstimates(); TrivialCostModel costModel = new TrivialCostModel(); PartitioningForStatement partitioning = new PartitioningForStatement(partitionParameter, inferSP, lockInSP); QueryPlanner planner = new QueryPlanner( catalogStmt.getSqltext(), catalogStmt.getTypeName(), catalogStmt.getParent().getTypeName(), catalog.getClusters().get("cluster"), db, partitioning, hsql, estimates, false, StatementCompiler.DEFAULT_MAX_JOIN_TABLES, costModel, null, joinOrder); CompiledPlan plan = null; planner.parse(); plan = planner.plan(); assert (plan != null); // Input Parameters // We will need to update the system catalogs with this new information // If this is an adhoc query then there won't be any parameters for (int i = 0; i < plan.parameters.length; ++i) { StmtParameter catalogParam = catalogStmt.getParameters().add(String.valueOf(i)); catalogParam.setJavatype(plan.parameters[i].getValue()); catalogParam.setIndex(i); } // Output Columns int index = 0; for (SchemaColumn col : plan.columns.getColumns()) { Column catColumn = catalogStmt.getOutput_columns().add(String.valueOf(index)); catColumn.setNullable(false); catColumn.setIndex(index); catColumn.setName(col.getColumnName()); catColumn.setType(col.getType().getValue()); catColumn.setSize(col.getSize()); index++; } List<PlanNodeList> nodeLists = new ArrayList<PlanNodeList>(); nodeLists.add(new PlanNodeList(plan.rootPlanGraph)); if (plan.subPlanGraph != null) { nodeLists.add(new PlanNodeList(plan.subPlanGraph)); } // Store the list of parameters types and indexes in the plan node list. List<Pair<Integer, VoltType>> parameters = nodeLists.get(0).getParameters(); for (int i = 0; i < plan.parameters.length; ++i) { Pair<Integer, VoltType> parameter = new Pair<Integer, VoltType>(i, plan.parameters[i]); parameters.add(parameter); } // Now update our catalog information // HACK: We're using the node_tree's hashCode() as it's name. It would be really // nice if the Catalog code give us an guid without needing a name first... String json = null; try { JSONObject jobj = new JSONObject(nodeLists.get(0).toJSONString()); json = jobj.toString(4); } catch (JSONException e2) { // TODO Auto-generated catch block e2.printStackTrace(); System.exit(-1); } // // We then stick a serialized version of PlanNodeTree into a PlanFragment // try { BuildDirectoryUtils.writeFile("statement-plans", name + "_json.txt", json); BuildDirectoryUtils.writeFile( "statement-plans", name + ".dot", nodeLists.get(0).toDOTString("name")); } catch (Exception e) { e.printStackTrace(); } List<AbstractPlanNode> plannodes = new ArrayList<AbstractPlanNode>(); for (PlanNodeList nodeList : nodeLists) { plannodes.add(nodeList.getRootPlanNode()); } m_currentPlan = plan; return plannodes; }
@Override public void process(Pair<TransactionTrace, Integer> p) { assert (p != null); final TransactionTrace txn_trace = p.getFirst(); final int i = p.getSecond(); // Interval final int txn_weight = (use_txn_weights ? txn_trace.getWeight() : 1); final String proc_key = CatalogKey.createKey(CatalogUtil.DEFAULT_DATABASE_NAME, txn_trace.getCatalogItemName()); // Terrible Hack: Assume that we are using the SingleSitedCostModel // and that // it will return fixed values based on whether the txn is // single-partitioned or not SingleSitedCostModel singlesited_cost_model = (SingleSitedCostModel) cost_models[i]; total_interval_txns[i] += txn_weight; total_interval_queries[i] += (txn_trace.getQueryCount() * txn_weight); histogram_procs.put(proc_key, txn_weight); try { singlesited_cost_model.estimateTransactionCost(catalogContext, workload, filter, txn_trace); TransactionCacheEntry txn_entry = singlesited_cost_model.getTransactionCacheEntry(txn_trace); assert (txn_entry != null) : "No txn entry for " + txn_trace; Collection<Integer> partitions = txn_entry.getTouchedPartitions(); // If the txn runs on only one partition, then the cost is // nothing if (txn_entry.isSinglePartitioned()) { singlepartition_ctrs[i] += txn_weight; if (!partitions.isEmpty()) { assert (txn_entry.getAllTouchedPartitionsHistogram().getValueCount() == 1) : txn_entry + " says it was single-partitioned but the partition count says otherwise:\n" + txn_entry.debug(); singlepartition_with_partitions_ctrs[i] += txn_weight; } histogram_sp_procs.put(proc_key, txn_weight); // If the txn runs on multiple partitions, then the cost // is... // XXX 2010-06-28: The number of partitions that the txn // touches divided by the total number of partitions // XXX 2010-07-02: The histogram for the total number of // partitions touched by all of the queries // in the transaction. This ensures that txns with just one // multi-partition query // isn't weighted the same as a txn with many // multi-partition queries } else { assert (!partitions.isEmpty()) : "No touched partitions for " + txn_trace; if (partitions.size() == 1 && txn_entry.getExecutionPartition() != HStoreConstants.NULL_PARTITION_ID) { assert (CollectionUtil.first(partitions) != txn_entry.getExecutionPartition()) : txn_entry.debug(); exec_mismatch_ctrs[i] += txn_weight; partitions_touched[i] += txn_weight; } else { assert (partitions.size() > 1) : String.format( "%s is not marked as single-partition but it only touches one partition\n%s", txn_trace, txn_entry.debug()); } partitions_touched[i] += (partitions.size() * txn_weight); // Txns multipartition_ctrs[i] += txn_weight; histogram_mp_procs.put(proc_key, txn_weight); } Integer base_partition = txn_entry.getExecutionPartition(); if (base_partition != null) { exec_histogram[i].put(base_partition, txn_weight); } else { exec_histogram[i].put(all_partitions, txn_weight); } if (debug.val) { // && // txn_trace.getCatalogItemName().equalsIgnoreCase("DeleteCallForwarding")) // { Procedure catalog_proc = txn_trace.getCatalogItem(catalogContext.database); Map<String, Object> inner = new LinkedHashMap<String, Object>(); for (Statement catalog_stmt : catalog_proc.getStatements()) { inner.put(catalog_stmt.fullName(), CatalogUtil.getReferencedTables(catalog_stmt)); } Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put(txn_trace.toString(), null); m.put("Interval", i); m.put("Single-Partition", txn_entry.isSinglePartitioned()); m.put("Base Partition", base_partition); m.put("Touched Partitions", partitions); m.put(catalog_proc.fullName(), inner); LOG.debug(StringUtil.formatMaps(m)); } // We need to keep a count of the number txns that didn't have // all of its queries estimated // completely so that we can update the access histograms down // below for entropy calculations // Note that this is at the txn level, not the query level. if (!txn_entry.isComplete()) { incomplete_txn_ctrs[i] += txn_weight; tmp_missingPartitions.clear(); tmp_missingPartitions.addAll(all_partitions); tmp_missingPartitions.removeAll(txn_entry.getTouchedPartitions()); // Update the histogram for this interval to keep track of // how many times we need to // increase the partition access histogram incomplete_txn_histogram[i].put(tmp_missingPartitions, txn_weight); if (trace.val) { Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put(String.format("Marking %s as incomplete in interval #%d", txn_trace, i), null); m.put("Examined Queries", txn_entry.getExaminedQueryCount()); m.put("Total Queries", txn_entry.getTotalQueryCount()); m.put("Touched Partitions", txn_entry.getTouchedPartitions()); m.put("Missing Partitions", tmp_missingPartitions); LOG.trace(StringUtil.formatMaps(m)); } } } catch (Exception ex) { CatalogUtil.saveCatalog(catalogContext.catalog, CatalogUtil.CATALOG_FILENAME); throw new RuntimeException( "Failed to estimate cost for " + txn_trace.getCatalogItemName() + " at interval " + i, ex); } }