private static void buildCatalog() throws IOException { // build a real catalog File cat = File.createTempFile("temp-log-reinitiator", "catalog"); cat.deleteOnExit(); VoltProjectBuilder builder = new VoltProjectBuilder(); String schema = "create table A (i integer not null, primary key (i));"; builder.addLiteralSchema(schema); builder.addPartitionInfo("A", "i"); builder.addStmtProcedure("hello", "select * from A where i = ?", "A.i: 0"); if (!builder.compile(cat.getAbsolutePath())) { throw new IOException(); } byte[] bytes = CatalogUtil.toBytes(cat); String serializedCat = CatalogUtil.loadCatalogFromJar(bytes, null); assertNotNull(serializedCat); Catalog catalog = new Catalog(); catalog.execute(serializedCat); String deploymentPath = builder.getPathToDeployment(); CatalogUtil.compileDeploymentAndGetCRC(catalog, deploymentPath, true); m_context = new CatalogContext(0, 0, catalog, bytes, 0, 0, 0); TheHashinator.initialize(LegacyHashinator.class, LegacyHashinator.getConfigureBytes(3)); }
@Override public void run() { CatalogAndIds catalogStuff = null; do { try { catalogStuff = CatalogUtil.getCatalogFromZK(m_rvdb.getHostMessenger().getZK()); } catch (org.apache.zookeeper_voltpatches.KeeperException.NoNodeException e) { } catch (Exception e) { VoltDB.crashLocalVoltDB( "System was interrupted while waiting for a catalog.", false, null); } } while (catalogStuff == null || catalogStuff.catalogBytes.length == 0); String serializedCatalog = null; byte[] catalogJarBytes = catalogStuff.catalogBytes; try { Pair<InMemoryJarfile, String> loadResults = CatalogUtil.loadAndUpgradeCatalogFromJar(catalogStuff.catalogBytes); serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(loadResults.getFirst()); catalogJarBytes = loadResults.getFirst().getFullJarBytes(); } catch (IOException e) { VoltDB.crashLocalVoltDB("Unable to load catalog", false, e); } if ((serializedCatalog == null) || (serializedCatalog.length() == 0)) VoltDB.crashLocalVoltDB("Catalog loading failure", false, null); /* N.B. node recovery requires discovering the current catalog version. */ Catalog catalog = new Catalog(); catalog.execute(serializedCatalog); serializedCatalog = null; // note if this fails it will print an error first // This is where we compile real catalog and create runtime // catalog context. To validate deployment we compile and create // a starter context which uses a placeholder catalog. String result = CatalogUtil.compileDeployment(catalog, m_deployment, false); if (result != null) { hostLog.fatal(result); VoltDB.crashLocalVoltDB(result); } try { m_rvdb.m_catalogContext = new CatalogContext( catalogStuff.txnId, catalogStuff.uniqueId, catalog, catalogJarBytes, // Our starter catalog has set the deployment stuff, just yoink it out for now m_rvdb.m_catalogContext.getDeploymentBytes(), catalogStuff.version, -1); } catch (Exception e) { VoltDB.crashLocalVoltDB("Error agreeing on starting catalog version", true, e); } }
public MockVoltDB(int clientPort, int adminPort, int httpPort, int drPort) { try { JSONObject obj = new JSONObject(); JSONArray jsonArray = new JSONArray(); jsonArray.put("127.0.0.1"); obj.put("interfaces", jsonArray); obj.put("clientPort", clientPort); obj.put("adminPort", adminPort); obj.put("httpPort", httpPort); obj.put("drPort", drPort); m_localMetadata = obj.toString(4); m_catalog = new Catalog(); m_catalog.execute("add / clusters " + m_clusterName); m_catalog.execute( "add " + m_catalog.getClusters().get(m_clusterName).getPath() + " databases " + m_databaseName); Cluster cluster = m_catalog.getClusters().get(m_clusterName); // Set a sane default for TestMessaging (at least) cluster.setHeartbeattimeout(10000); assert (cluster != null); try { m_hostMessenger.start(); } catch (Exception e) { throw new RuntimeException(e); } VoltZK.createPersistentZKNodes(m_hostMessenger.getZK()); m_hostMessenger .getZK() .create( VoltZK.cluster_metadata + "/" + m_hostMessenger.getHostId(), getLocalMetadata().getBytes("UTF-8"), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); m_hostMessenger.generateMailboxId( m_hostMessenger.getHSIdForLocalSite(HostMessenger.STATS_SITE_ID)); m_statsAgent = new StatsAgent(); m_statsAgent.registerMailbox( m_hostMessenger, m_hostMessenger.getHSIdForLocalSite(HostMessenger.STATS_SITE_ID)); for (MailboxType type : MailboxType.values()) { m_mailboxMap.put(type, new LinkedList<MailboxNodeContent>()); } m_mailboxMap .get(MailboxType.StatsAgent) .add( new MailboxNodeContent( m_hostMessenger.getHSIdForLocalSite(HostMessenger.STATS_SITE_ID), null)); m_siteTracker = new SiteTracker(m_hostId, m_mailboxMap); } catch (Exception e) { throw new RuntimeException(e); } }
/** * Loads the schema at ddlurl and setups a voltcompiler / hsql instance. * * @param ddlurl URL to the schema/ddl file. * @param basename Unique string, JSON plans [basename]-stmt-#_json.txt on disk * @throws Exception */ public PlannerTestAideDeCamp(URL ddlurl, String basename) throws Exception { catalog = new Catalog(); catalog.execute("add / clusters cluster"); catalog.execute("add /clusters[cluster] databases database"); db = catalog.getClusters().get("cluster").getDatabases().get("database"); proc = db.getProcedures().add(basename); String schemaPath = URLDecoder.decode(ddlurl.getPath(), "UTF-8"); VoltCompiler compiler = new VoltCompiler(); hsql = HSQLInterface.loadHsqldb(); // hsql.runDDLFile(schemaPath); VoltDDLElementTracker partitionMap = new VoltDDLElementTracker(compiler); DDLCompiler ddl_compiler = new DDLCompiler(compiler, hsql, partitionMap, db); ddl_compiler.loadSchema(schemaPath); ddl_compiler.compileToCatalog(catalog, db); }
public synchronized void killSite(long siteId) { m_catalog = m_catalog.deepCopy(); for (List<MailboxNodeContent> lmnc : m_mailboxMap.values()) { Iterator<MailboxNodeContent> iter = lmnc.iterator(); while (iter.hasNext()) { if (iter.next().HSId == siteId) { iter.remove(); } } } m_siteTracker = new SiteTracker(m_hostId, m_mailboxMap); }
void compileXMLRootNode(ProjectType project) throws VoltCompilerException { m_catalog = new Catalog(); temporaryCatalogInit(); SecurityType security = project.getSecurity(); if (security != null) { m_catalog.getClusters().get("cluster").setSecurityenabled(security.isEnabled()); } DatabaseType database = project.getDatabase(); if (database != null) { compileDatabaseNode(database); } }
/** Generate the HTML catalog report from a newly compiled VoltDB catalog */ public static String report(Catalog catalog, ArrayList<Feedback> warnings) throws IOException { // asynchronously get platform properties new Thread() { @Override public void run() { PlatformProperties.getPlatformProperties(); } }.start(); URL url = Resources.getResource(ReportMaker.class, "template.html"); String contents = Resources.toString(url, Charsets.UTF_8); Cluster cluster = catalog.getClusters().get("cluster"); assert (cluster != null); Database db = cluster.getDatabases().get("database"); assert (db != null); String statsData = getStatsHTML(db, warnings); contents = contents.replace("##STATS##", statsData); String schemaData = generateSchemaTable(db.getTables(), db.getConnectors()); contents = contents.replace("##SCHEMA##", schemaData); String procData = generateProceduresTable(db.getProcedures()); contents = contents.replace("##PROCS##", procData); DatabaseSizes sizes = CatalogSizing.getCatalogSizes(db); String sizeData = generateSizeTable(sizes); contents = contents.replace("##SIZES##", sizeData); String sizeSummary = generateSizeSummary(sizes); contents = contents.replace("##SIZESUMMARY##", sizeSummary); String platformData = PlatformProperties.getPlatformProperties().toHTML(); contents = contents.replace("##PLATFORM##", platformData); contents = contents.replace("##VERSION##", VoltDB.instance().getVersionString()); DateFormat df = new SimpleDateFormat("d MMM yyyy HH:mm:ss z"); contents = contents.replace("##TIMESTAMP##", df.format(m_timestamp)); String msg = Encoder.hexEncode(VoltDB.instance().getVersionString() + "," + System.currentTimeMillis()); contents = contents.replace("get.py?a=KEY&", String.format("get.py?a=%s&", msg)); return contents; }
protected void setUp(AbstractProjectBuilder projectBuilder, boolean force) throws Exception { super.setUp(); is_first = (is_first == null ? true : false); this.last_type = ProjectType.TEST; if (force == false) { catalog = project_catalogs.get(this.last_type); catalog_db = project_databases.get(this.last_type); p_estimator = project_p_estimators.get(this.last_type); } if (catalog == null || force) { String catalogJar = new File(projectBuilder.getJarName(true)).getAbsolutePath(); try { boolean status = projectBuilder.compile(catalogJar); assert (status); } catch (Exception ex) { throw new RuntimeException( "Failed to create " + projectBuilder.getProjectName() + " catalog [" + catalogJar + "]", ex); } catalog = new Catalog(); try { // read in the catalog String serializedCatalog = JarReader.readFileFromJarfile(catalogJar, CatalogUtil.CATALOG_FILENAME); // create the catalog (that will be passed to the ClientInterface catalog.execute(serializedCatalog); } catch (Exception ex) { throw new RuntimeException( "Failed to load " + projectBuilder.getProjectName() + " catalog [" + catalogJar + "]", ex); } this.init(this.last_type, catalog); } }
public Cluster getCluster() { return m_catalog.getClusters().get(m_clusterName); }
@SuppressWarnings("deprecation") @Override public void setUp() throws IOException, InterruptedException { VoltDB.instance().readBuildInfo("Test"); // compile a catalog String testDir = BuildDirectoryUtils.getBuildDirectoryPath(); String catalogJar = testDir + File.separator + JAR; TPCCProjectBuilder pb = new TPCCProjectBuilder(); pb.addDefaultSchema(); pb.addDefaultPartitioning(); pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class); pb.compile(catalogJar, 2, 0); // load a catalog byte[] bytes = CatalogUtil.toBytes(new File(catalogJar)); String serializedCatalog = CatalogUtil.loadCatalogFromJar(bytes, null); // create the catalog (that will be passed to the ClientInterface catalog = new Catalog(); catalog.execute(serializedCatalog); // update the catalog with the data from the deployment file String pathToDeployment = pb.getPathToDeployment(); assertTrue(CatalogUtil.compileDeploymentAndGetCRC(catalog, pathToDeployment, true) >= 0); cluster = catalog.getClusters().get("cluster"); CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures(); Procedure insertProc = procedures.get("InsertNewOrder"); assert (insertProc != null); selectProc = procedures.get("MultiSiteSelect"); assert (selectProc != null); // Each EE needs its own thread for correct initialization. final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>(); final byte configBytes[] = LegacyHashinator.getConfigureBytes(2); Thread site1Thread = new Thread() { @Override public void run() { site1Reference.set( new ExecutionEngineJNI( cluster.getRelativeIndex(), 1, 0, 0, "", 100, HashinatorType.LEGACY, configBytes)); } }; site1Thread.start(); site1Thread.join(); final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>(); Thread site2Thread = new Thread() { @Override public void run() { site2Reference.set( new ExecutionEngineJNI( cluster.getRelativeIndex(), 2, 1, 0, "", 100, HashinatorType.LEGACY, configBytes)); } }; site2Thread.start(); site2Thread.join(); // create two EEs site1 = new ExecutionSite(0); // site 0 ee1 = site1Reference.get(); ee1.loadCatalog(0, catalog.serialize()); site2 = new ExecutionSite(1); // site 1 ee2 = site2Reference.get(); ee2.loadCatalog(0, catalog.serialize()); // cache some plan fragments selectStmt = selectProc.getStatements().get("selectAll"); assert (selectStmt != null); int i = 0; // this kinda assumes the right order for (PlanFragment f : selectStmt.getFragments()) { if (i == 0) selectTopFrag = f; else selectBottomFrag = f; i++; } assert (selectTopFrag != null); assert (selectBottomFrag != null); if (selectTopFrag.getHasdependencies() == false) { PlanFragment temp = selectTopFrag; selectTopFrag = selectBottomFrag; selectBottomFrag = temp; } // get the insert frag Statement insertStmt = insertProc.getStatements().get("insert"); assert (insertStmt != null); for (PlanFragment f : insertStmt.getFragments()) insertFrag = f; // populate plan cache ActivePlanRepository.clear(); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(selectBottomFrag), Encoder.base64Decode(selectBottomFrag.getPlannodetree())); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(selectTopFrag), Encoder.base64Decode(selectTopFrag.getPlannodetree())); ActivePlanRepository.addFragmentForTest( CatalogUtil.getUniqueIdForFragment(insertFrag), Encoder.base64Decode(insertFrag.getPlannodetree())); // insert some data ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L); VoltTable[] results = ee2.executePlanFragments( 1, new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)}, null, new ParameterSet[] {params}, 1, 0, 42, Long.MAX_VALUE); assert (results.length == 1); assert (results[0].asScalarLong() == 1L); params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L); results = ee1.executePlanFragments( 1, new long[] {CatalogUtil.getUniqueIdForFragment(insertFrag)}, null, new ParameterSet[] {params}, 2, 1, 42, Long.MAX_VALUE); assert (results.length == 1); assert (results[0].asScalarLong() == 1L); }
/** * Initialize TheHashinator * * @param catalog A pointer to the catalog data structure. */ public static void initialize(Catalog catalog) { Cluster cluster = catalog.getClusters().get("cluster"); partitionCount = cluster.getNum_partitions(); }
public CatalogContext update(String pathToNewJar, String diffCommands) { Catalog newCatalog = catalog.deepCopy(); newCatalog.execute(diffCommands); CatalogContext retval = new CatalogContext(newCatalog, pathToNewJar); return retval; }
@SuppressWarnings("unchecked") public Catalog compileCatalog(final String projectFileURL, final ClusterConfig clusterConfig) { if (!clusterConfig.validate()) { addErr(clusterConfig.getErrorMsg()); return null; } // Compiler instance is reusable. Clear the cache. cachedAddedClasses.clear(); m_currentFilename = new File(projectFileURL).getName(); m_jarBuilder = new JarBuilder(this); if (m_outputStream != null) { m_outputStream.println("\n** BEGIN PROJECT COMPILE: " + m_currentFilename + " **"); } ProjectType project = null; try { JAXBContext jc = JAXBContext.newInstance("org.voltdb.compiler.projectfile"); // This schema shot the sheriff. SchemaFactory sf = SchemaFactory.newInstance(javax.xml.XMLConstants.W3C_XML_SCHEMA_NS_URI); Schema schema = sf.newSchema(this.getClass().getResource("ProjectFileSchema.xsd")); Unmarshaller unmarshaller = jc.createUnmarshaller(); // But did not shoot unmarshaller! unmarshaller.setSchema(schema); JAXBElement<ProjectType> result = (JAXBElement<ProjectType>) unmarshaller.unmarshal(new File(projectFileURL)); project = result.getValue(); } catch (JAXBException e) { // Convert some linked exceptions to more friendly errors. if (e.getLinkedException() instanceof java.io.FileNotFoundException) { addErr(e.getLinkedException().getMessage()); return null; } if (e.getLinkedException() instanceof org.xml.sax.SAXParseException) { addErr("Error schema validating project.xml file. " + e.getLinkedException().getMessage()); return null; } throw new RuntimeException(e); } catch (SAXException e) { addErr("Error schema validating project.xml file. " + e.getMessage()); return null; } try { compileXMLRootNode(project); } catch (final VoltCompilerException e) { // compilerLog.l7dlog( Level.ERROR, // LogKeys.compiler_VoltCompiler_FailedToCompileXML.name(), null); LOG.error(e.getMessage(), e); // e.printStackTrace(); return null; } assert (m_catalog != null); try { ClusterCompiler.compile(m_catalog, clusterConfig); } catch (RuntimeException e) { addErr(e.getMessage()); return null; } // Optimization: Vertical Partitioning if (m_enableVerticalPartitionOptimizations) { if (m_verticalPartitionPlanner == null) { m_verticalPartitionPlanner = new VerticalPartitionPlanner(CatalogUtil.getDatabase(m_catalog), true); } try { m_verticalPartitionPlanner.optimizeDatabase(); } catch (Exception ex) { LOG.warn("Unexpected error", ex); addErr("Failed to apply vertical partition optimizations"); } } // add epoch info to catalog final int epoch = (int) (TransactionIdManager.getEpoch() / 1000); m_catalog.getClusters().get("cluster").setLocalepoch(epoch); // done handling files m_currentFilename = null; return m_catalog; }
/** * Compile and cache the statement and plan and return the final plan graph. * * @param sql * @param paramCount */ public List<AbstractPlanNode> compile( String sql, int paramCount, String joinOrder, Object partitionParameter, boolean inferSP, boolean lockInSP) { Statement catalogStmt = proc.getStatements().add("stmt-" + String.valueOf(compileCounter++)); catalogStmt.setSqltext(sql); catalogStmt.setSinglepartition(partitionParameter != null); catalogStmt.setBatched(false); catalogStmt.setParamnum(paramCount); // determine the type of the query QueryType qtype = QueryType.SELECT; catalogStmt.setReadonly(true); if (sql.toLowerCase().startsWith("insert")) { qtype = QueryType.INSERT; catalogStmt.setReadonly(false); } if (sql.toLowerCase().startsWith("update")) { qtype = QueryType.UPDATE; catalogStmt.setReadonly(false); } if (sql.toLowerCase().startsWith("delete")) { qtype = QueryType.DELETE; catalogStmt.setReadonly(false); } catalogStmt.setQuerytype(qtype.getValue()); // name will look like "basename-stmt-#" String name = catalogStmt.getParent().getTypeName() + "-" + catalogStmt.getTypeName(); DatabaseEstimates estimates = new DatabaseEstimates(); TrivialCostModel costModel = new TrivialCostModel(); PartitioningForStatement partitioning = new PartitioningForStatement(partitionParameter, inferSP, lockInSP); QueryPlanner planner = new QueryPlanner( catalogStmt.getSqltext(), catalogStmt.getTypeName(), catalogStmt.getParent().getTypeName(), catalog.getClusters().get("cluster"), db, partitioning, hsql, estimates, false, StatementCompiler.DEFAULT_MAX_JOIN_TABLES, costModel, null, joinOrder); CompiledPlan plan = null; planner.parse(); plan = planner.plan(); assert (plan != null); // Input Parameters // We will need to update the system catalogs with this new information // If this is an adhoc query then there won't be any parameters for (int i = 0; i < plan.parameters.length; ++i) { StmtParameter catalogParam = catalogStmt.getParameters().add(String.valueOf(i)); catalogParam.setJavatype(plan.parameters[i].getValue()); catalogParam.setIndex(i); } // Output Columns int index = 0; for (SchemaColumn col : plan.columns.getColumns()) { Column catColumn = catalogStmt.getOutput_columns().add(String.valueOf(index)); catColumn.setNullable(false); catColumn.setIndex(index); catColumn.setName(col.getColumnName()); catColumn.setType(col.getType().getValue()); catColumn.setSize(col.getSize()); index++; } List<PlanNodeList> nodeLists = new ArrayList<PlanNodeList>(); nodeLists.add(new PlanNodeList(plan.rootPlanGraph)); if (plan.subPlanGraph != null) { nodeLists.add(new PlanNodeList(plan.subPlanGraph)); } // Store the list of parameters types and indexes in the plan node list. List<Pair<Integer, VoltType>> parameters = nodeLists.get(0).getParameters(); for (int i = 0; i < plan.parameters.length; ++i) { Pair<Integer, VoltType> parameter = new Pair<Integer, VoltType>(i, plan.parameters[i]); parameters.add(parameter); } // Now update our catalog information // HACK: We're using the node_tree's hashCode() as it's name. It would be really // nice if the Catalog code give us an guid without needing a name first... String json = null; try { JSONObject jobj = new JSONObject(nodeLists.get(0).toJSONString()); json = jobj.toString(4); } catch (JSONException e2) { // TODO Auto-generated catch block e2.printStackTrace(); System.exit(-1); } // // We then stick a serialized version of PlanNodeTree into a PlanFragment // try { BuildDirectoryUtils.writeFile("statement-plans", name + "_json.txt", json); BuildDirectoryUtils.writeFile( "statement-plans", name + ".dot", nodeLists.get(0).toDOTString("name")); } catch (Exception e) { e.printStackTrace(); } List<AbstractPlanNode> plannodes = new ArrayList<AbstractPlanNode>(); for (PlanNodeList nodeList : nodeLists) { plannodes.add(nodeList.getRootPlanNode()); } m_currentPlan = plan; return plannodes; }
void compileDatabaseNode(DatabaseType database) throws VoltCompilerException { final ArrayList<String> programs = new ArrayList<String>(); final ArrayList<String> schemas = new ArrayList<String>(); final ArrayList<ProcedureDescriptor> procedures = new ArrayList<ProcedureDescriptor>(); final ArrayList<Class<?>> classDependencies = new ArrayList<Class<?>>(); final ArrayList<String[]> partitions = new ArrayList<String[]>(); final String databaseName = database.getName(); // schema does not verify that the database is named "database" if (databaseName.equals("database") == false) { final String msg = "VoltDB currently requires all database elements to be named " + "\"database\" (found: \"" + databaseName + "\")"; throw new VoltCompilerException(msg); } // create the database in the catalog m_catalog.execute("add /clusters[cluster] databases " + databaseName); Database db = m_catalog.getClusters().get("cluster").getDatabases().get(databaseName); SnapshotType snapshotSettings = database.getSnapshot(); if (snapshotSettings != null) { SnapshotSchedule schedule = db.getSnapshotschedule().add("default"); String frequency = snapshotSettings.getFrequency(); if (!frequency.endsWith("s") && !frequency.endsWith("m") && !frequency.endsWith("h")) { throw new VoltCompilerException( "Snapshot frequency " + frequency + " needs to end with time unit specified" + " that is one of [s, m, h] (seconds, minutes, hours)"); } int frequencyInt = 0; String frequencySubstring = frequency.substring(0, frequency.length() - 1); try { frequencyInt = Integer.parseInt(frequencySubstring); } catch (Exception e) { throw new VoltCompilerException("Frequency " + frequencySubstring + " is not an integer "); } String prefix = snapshotSettings.getPrefix(); if (prefix == null || prefix.isEmpty()) { throw new VoltCompilerException("Snapshot prefix " + prefix + " is not a valid prefix "); } if (prefix.contains("-") || prefix.contains(",")) { throw new VoltCompilerException("Snapshot prefix " + prefix + " cannot include , or - "); } String path = snapshotSettings.getPath(); if (path == null || path.isEmpty()) { throw new VoltCompilerException("Snapshot path " + path + " is not a valid path "); } if (snapshotSettings.getRetain() == null) { throw new VoltCompilerException("Snapshot retain value not provided"); } int retain = snapshotSettings.getRetain().intValue(); if (retain < 1) { throw new VoltCompilerException( "Snapshot retain value " + retain + " is not a valid value. Must be 1 or greater."); } schedule.setFrequencyunit(frequency.substring(frequency.length() - 1, frequency.length())); schedule.setFrequencyvalue(frequencyInt); schedule.setPath(path); schedule.setPrefix(prefix); schedule.setRetain(retain); } // schemas/schema for (SchemasType.Schema schema : database.getSchemas().getSchema()) { LOG.l7dlog( Level.DEBUG, LogKeys.compiler_VoltCompiler_CatalogPath.name(), new Object[] {schema.getPath()}, null); schemas.add(schema.getPath()); } // groups/group. if (database.getGroups() != null) { for (GroupsType.Group group : database.getGroups().getGroup()) { org.voltdb.catalog.Group catGroup = db.getGroups().add(group.getName()); catGroup.setAdhoc(group.isAdhoc()); catGroup.setSysproc(group.isSysproc()); } } // users/user if (database.getUsers() != null) { for (UsersType.User user : database.getUsers().getUser()) { org.voltdb.catalog.User catUser = db.getUsers().add(user.getName()); catUser.setAdhoc(user.isAdhoc()); catUser.setSysproc(user.isSysproc()); byte passwordHash[] = extractPassword(user.getPassword()); catUser.setShadowpassword(Encoder.hexEncode(passwordHash)); // process the @groups comma separated list if (user.getGroups() != null) { String grouplist[] = user.getGroups().split(","); for (final String group : grouplist) { final GroupRef groupRef = catUser.getGroups().add(group); final Group catalogGroup = db.getGroups().get(group); if (catalogGroup != null) { groupRef.setGroup(catalogGroup); } } } } } // procedures/procedure for (ProceduresType.Procedure proc : database.getProcedures().getProcedure()) { procedures.add(getProcedure(proc)); } // classdependencies/classdependency if (database.getClassdependencies() != null) { for (Classdependency dep : database.getClassdependencies().getClassdependency()) { classDependencies.add(getClassDependency(dep)); } } // partitions/table if (database.getPartitions() != null) { for (org.voltdb.compiler.projectfile.PartitionsType.Partition table : database.getPartitions().getPartition()) { partitions.add(getPartition(table)); } } String msg = "Database \"" + databaseName + "\" "; // TODO: schema allows 0 procedures. Testbase relies on this. if (procedures.size() == 0) { msg += "needs at least one \"procedure\" element " + "(currently has " + String.valueOf(procedures.size()) + ")"; throw new VoltCompilerException(msg); } if (procedures.size() < 1) { msg += "is missing the \"procedures\" element"; throw new VoltCompilerException(msg); } // shutdown and make a new hsqldb m_hsql = HSQLInterface.loadHsqldb(); // Actually parse and handle all the programs for (final String programName : programs) { m_catalog.execute("add " + db.getPath() + " programs " + programName); } // Actually parse and handle all the DDL final DDLCompiler ddlcompiler = new DDLCompiler(this, m_hsql); for (final String schemaPath : schemas) { File schemaFile = null; if (schemaPath.contains(".jar!")) { String ddlText = null; try { ddlText = JarReader.readFileFromJarfile(schemaPath); } catch (final Exception e) { throw new VoltCompilerException(e); } schemaFile = VoltProjectBuilder.writeStringToTempFile(ddlText); } else { schemaFile = new File(schemaPath); } if (!schemaFile.isAbsolute()) { // Resolve schemaPath relative to the database definition xml file schemaFile = new File(new File(m_projectFileURL).getParent(), schemaPath); } // add the file object's path to the list of files for the jar m_ddlFilePaths.put(schemaFile.getName(), schemaFile.getPath()); ddlcompiler.loadSchema(schemaFile.getAbsolutePath()); } ddlcompiler.compileToCatalog(m_catalog, db); // Actually parse and handle all the partitions // this needs to happen before procedures are compiled msg = "In database \"" + databaseName + "\", "; final CatalogMap<Table> tables = db.getTables(); for (final String[] partition : partitions) { final String tableName = partition[0]; final String colName = partition[1]; final Table t = tables.getIgnoreCase(tableName); if (t == null) { msg += "\"partition\" element has unknown \"table\" attribute '" + tableName + "'"; throw new VoltCompilerException(msg); } final Column c = t.getColumns().getIgnoreCase(colName); // make sure the column exists if (c == null) { msg += "\"partition\" element has unknown \"column\" attribute '" + colName + "'"; throw new VoltCompilerException(msg); } // make sure the column is marked not-nullable if (c.getNullable() == true) { msg += "Partition column '" + tableName + "." + colName + "' is nullable. " + "Partition columns must be constrained \"NOT NULL\"."; throw new VoltCompilerException(msg); } t.setPartitioncolumn(c); t.setIsreplicated(false); // Set the destination tables of associated views non-replicated. // If a view's source table is replicated, then a full scan of the // associated view is singled-sited. If the source is partitioned, // a full scan of the view must be distributed. final CatalogMap<MaterializedViewInfo> views = t.getViews(); for (final MaterializedViewInfo mvi : views) { mvi.getDest().setIsreplicated(false); } } // add vertical partitions if (database.getVerticalpartitions() != null) { for (Verticalpartition vp : database.getVerticalpartitions().getVerticalpartition()) { try { addVerticalPartition(db, vp.getTable(), vp.getColumn(), vp.isIndexed()); } catch (Exception ex) { throw new VoltCompilerException( "Failed to create vertical partition for " + vp.getTable(), ex); } } } // this should reorder the tables and partitions all alphabetically String catData = m_catalog.serialize(); m_catalog = new Catalog(); m_catalog.execute(catData); db = m_catalog.getClusters().get("cluster").getDatabases().get(databaseName); // add database estimates info addDatabaseEstimatesInfo(m_estimates, db); addSystemProcsToCatalog(m_catalog, db); // Process and add exports and connectors to the catalog // Must do this before compiling procedures to deny updates // on append-only tables. if (database.getExports() != null) { // currently, only a single connector is allowed Connector conn = database.getExports().getConnector(); compileConnector(conn, db); } // Actually parse and handle all the Procedures for (final ProcedureDescriptor procedureDescriptor : procedures) { final String procedureName = procedureDescriptor.m_className; m_currentFilename = procedureName.substring(procedureName.lastIndexOf('.') + 1); m_currentFilename += ".class"; ProcedureCompiler.compile(this, m_hsql, m_estimates, m_catalog, db, procedureDescriptor); } // Add all the class dependencies to the output jar for (final Class<?> classDependency : classDependencies) { addClassToJar(classDependency, this); } m_hsql.close(); }
/** Initialize the catalog for one cluster */ void temporaryCatalogInit() { m_catalog.execute("add / clusters cluster"); m_catalog.getClusters().get("cluster").setSecurityenabled(false); }
public CatalogContext deepCopy() { return new CatalogContext(catalog.deepCopy(), jarPath); }
/** * @param projectFileURL URL of the project file. * @param clusterConfig Object containing desired physical cluster parameters * @param jarOutputPath The location to put the finished JAR to. * @param output Where to print status/errors to, usually stdout. * @param procInfoOverrides Optional overridden values for procedure annotations. */ public boolean compile( final String projectFileURL, final ClusterConfig clusterConfig, final String jarOutputPath, final PrintStream output, final Map<String, ProcInfoData> procInfoOverrides) { m_hsql = null; m_projectFileURL = projectFileURL; m_jarOutputPath = jarOutputPath; m_outputStream = output; // use this map as default annotation values m_procInfoOverrides = procInfoOverrides; LOG.l7dlog( Level.DEBUG, LogKeys.compiler_VoltCompiler_LeaderAndHostCountAndSitesPerHost.name(), new Object[] { clusterConfig.getLeaderAddress(), clusterConfig.getHostCount(), clusterConfig.getSitesPerHost() }, null); // do all the work to get the catalog final Catalog catalog = compileCatalog(projectFileURL, clusterConfig); if (catalog == null) { LOG.error( "VoltCompiler had " + m_errors.size() + " errors\n" + StringUtil.join("\n", m_errors)); return (false); } // WRITE CATALOG TO JAR HERE final String catalogCommands = catalog.serialize(); byte[] catalogBytes = null; try { catalogBytes = catalogCommands.getBytes("UTF-8"); } catch (final UnsupportedEncodingException e1) { addErr("Can't encode the compiled catalog file correctly"); return false; } // Create Dtxn.Coordinator configuration for cluster // byte[] dtxnConfBytes = null; // try { // dtxnConfBytes = HStoreDtxnConf.toHStoreDtxnConf(catalog).getBytes("UTF-8"); // } catch (final Exception e1) { // addErr("Can't encode the Dtxn.Coordinator configuration file correctly"); // return false; // } try { // m_jarBuilder.addEntry("dtxn.conf", dtxnConfBytes); m_jarBuilder.addEntry(CatalogUtil.CATALOG_FILENAME, catalogBytes); m_jarBuilder.addEntry("project.xml", new File(projectFileURL)); for (final Entry<String, String> e : m_ddlFilePaths.entrySet()) m_jarBuilder.addEntry(e.getKey(), new File(e.getValue())); m_jarBuilder.writeJarToDisk(jarOutputPath); } catch (final VoltCompilerException e) { return false; } assert (!hasErrors()); if (hasErrors()) { return false; } return true; }