/** * Log and exit if a dependency list fails an invariant. * * @param dependencyId * @param dependencies */ void verifyDependencySanity(final Integer dependencyId, final List<VoltTable> dependencies) { if (dependencies == null) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyNotFound.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } for (final Object dependency : dependencies) { if (dependency == null) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyContainedNull.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } if (!(dependency instanceof VoltTable)) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_DependencyNotVoltTable.name(), new Object[] {dependencyId}, null); VoltDB.crashVoltDB(); } if (t) LOG.trace(String.format("Storing Dependency %d\n:%s", dependencyId, dependency)); } // FOR }
/** * Given an Object value, pick a partition to store the data. Currently only String objects can be * hashed. * * @param value The value to hash. * @param partitionCount The number of partitions to choose from. * @return A value between 0 and partitionCount-1, hopefully pretty evenly distributed. */ static int hashinate(Object value, int partitionCount) { if (value instanceof String) { String string = (String) value; try { byte bytes[] = string.getBytes("UTF-8"); int hashCode = 0; int offset = 0; for (int ii = 0; ii < bytes.length; ii++) { hashCode = 31 * hashCode + bytes[offset++]; } return java.lang.Math.abs(hashCode % partitionCount); } catch (UnsupportedEncodingException e) { hostLogger.l7dlog( Level.FATAL, LogKeys.host_TheHashinator_ExceptionHashingString.name(), new Object[] {string}, e); HStore.crashDB(); } } hostLogger.l7dlog( Level.FATAL, LogKeys.host_TheHashinator_AttemptedToHashinateNonLongOrString.name(), new Object[] {value.getClass().getName()}, null); HStore.crashDB(); return -1; }
/** Read a hashed password from password. */ private byte[] extractPassword(String password) { MessageDigest md = null; try { md = MessageDigest.getInstance("SHA-1"); } catch (final NoSuchAlgorithmException e) { LOG.l7dlog(Level.FATAL, LogKeys.compiler_VoltCompiler_NoSuchAlgorithm.name(), e); System.exit(-1); } final byte passwordHash[] = md.digest(md.digest(password.getBytes())); return passwordHash; }
void compileDatabaseNode(DatabaseType database) throws VoltCompilerException { final ArrayList<String> programs = new ArrayList<String>(); final ArrayList<String> schemas = new ArrayList<String>(); final ArrayList<ProcedureDescriptor> procedures = new ArrayList<ProcedureDescriptor>(); final ArrayList<Class<?>> classDependencies = new ArrayList<Class<?>>(); final ArrayList<String[]> partitions = new ArrayList<String[]>(); final String databaseName = database.getName(); // schema does not verify that the database is named "database" if (databaseName.equals("database") == false) { final String msg = "VoltDB currently requires all database elements to be named " + "\"database\" (found: \"" + databaseName + "\")"; throw new VoltCompilerException(msg); } // create the database in the catalog m_catalog.execute("add /clusters[cluster] databases " + databaseName); Database db = m_catalog.getClusters().get("cluster").getDatabases().get(databaseName); SnapshotType snapshotSettings = database.getSnapshot(); if (snapshotSettings != null) { SnapshotSchedule schedule = db.getSnapshotschedule().add("default"); String frequency = snapshotSettings.getFrequency(); if (!frequency.endsWith("s") && !frequency.endsWith("m") && !frequency.endsWith("h")) { throw new VoltCompilerException( "Snapshot frequency " + frequency + " needs to end with time unit specified" + " that is one of [s, m, h] (seconds, minutes, hours)"); } int frequencyInt = 0; String frequencySubstring = frequency.substring(0, frequency.length() - 1); try { frequencyInt = Integer.parseInt(frequencySubstring); } catch (Exception e) { throw new VoltCompilerException("Frequency " + frequencySubstring + " is not an integer "); } String prefix = snapshotSettings.getPrefix(); if (prefix == null || prefix.isEmpty()) { throw new VoltCompilerException("Snapshot prefix " + prefix + " is not a valid prefix "); } if (prefix.contains("-") || prefix.contains(",")) { throw new VoltCompilerException("Snapshot prefix " + prefix + " cannot include , or - "); } String path = snapshotSettings.getPath(); if (path == null || path.isEmpty()) { throw new VoltCompilerException("Snapshot path " + path + " is not a valid path "); } if (snapshotSettings.getRetain() == null) { throw new VoltCompilerException("Snapshot retain value not provided"); } int retain = snapshotSettings.getRetain().intValue(); if (retain < 1) { throw new VoltCompilerException( "Snapshot retain value " + retain + " is not a valid value. Must be 1 or greater."); } schedule.setFrequencyunit(frequency.substring(frequency.length() - 1, frequency.length())); schedule.setFrequencyvalue(frequencyInt); schedule.setPath(path); schedule.setPrefix(prefix); schedule.setRetain(retain); } // schemas/schema for (SchemasType.Schema schema : database.getSchemas().getSchema()) { LOG.l7dlog( Level.DEBUG, LogKeys.compiler_VoltCompiler_CatalogPath.name(), new Object[] {schema.getPath()}, null); schemas.add(schema.getPath()); } // groups/group. if (database.getGroups() != null) { for (GroupsType.Group group : database.getGroups().getGroup()) { org.voltdb.catalog.Group catGroup = db.getGroups().add(group.getName()); catGroup.setAdhoc(group.isAdhoc()); catGroup.setSysproc(group.isSysproc()); } } // users/user if (database.getUsers() != null) { for (UsersType.User user : database.getUsers().getUser()) { org.voltdb.catalog.User catUser = db.getUsers().add(user.getName()); catUser.setAdhoc(user.isAdhoc()); catUser.setSysproc(user.isSysproc()); byte passwordHash[] = extractPassword(user.getPassword()); catUser.setShadowpassword(Encoder.hexEncode(passwordHash)); // process the @groups comma separated list if (user.getGroups() != null) { String grouplist[] = user.getGroups().split(","); for (final String group : grouplist) { final GroupRef groupRef = catUser.getGroups().add(group); final Group catalogGroup = db.getGroups().get(group); if (catalogGroup != null) { groupRef.setGroup(catalogGroup); } } } } } // procedures/procedure for (ProceduresType.Procedure proc : database.getProcedures().getProcedure()) { procedures.add(getProcedure(proc)); } // classdependencies/classdependency if (database.getClassdependencies() != null) { for (Classdependency dep : database.getClassdependencies().getClassdependency()) { classDependencies.add(getClassDependency(dep)); } } // partitions/table if (database.getPartitions() != null) { for (org.voltdb.compiler.projectfile.PartitionsType.Partition table : database.getPartitions().getPartition()) { partitions.add(getPartition(table)); } } String msg = "Database \"" + databaseName + "\" "; // TODO: schema allows 0 procedures. Testbase relies on this. if (procedures.size() == 0) { msg += "needs at least one \"procedure\" element " + "(currently has " + String.valueOf(procedures.size()) + ")"; throw new VoltCompilerException(msg); } if (procedures.size() < 1) { msg += "is missing the \"procedures\" element"; throw new VoltCompilerException(msg); } // shutdown and make a new hsqldb m_hsql = HSQLInterface.loadHsqldb(); // Actually parse and handle all the programs for (final String programName : programs) { m_catalog.execute("add " + db.getPath() + " programs " + programName); } // Actually parse and handle all the DDL final DDLCompiler ddlcompiler = new DDLCompiler(this, m_hsql); for (final String schemaPath : schemas) { File schemaFile = null; if (schemaPath.contains(".jar!")) { String ddlText = null; try { ddlText = JarReader.readFileFromJarfile(schemaPath); } catch (final Exception e) { throw new VoltCompilerException(e); } schemaFile = VoltProjectBuilder.writeStringToTempFile(ddlText); } else { schemaFile = new File(schemaPath); } if (!schemaFile.isAbsolute()) { // Resolve schemaPath relative to the database definition xml file schemaFile = new File(new File(m_projectFileURL).getParent(), schemaPath); } // add the file object's path to the list of files for the jar m_ddlFilePaths.put(schemaFile.getName(), schemaFile.getPath()); ddlcompiler.loadSchema(schemaFile.getAbsolutePath()); } ddlcompiler.compileToCatalog(m_catalog, db); // Actually parse and handle all the partitions // this needs to happen before procedures are compiled msg = "In database \"" + databaseName + "\", "; final CatalogMap<Table> tables = db.getTables(); for (final String[] partition : partitions) { final String tableName = partition[0]; final String colName = partition[1]; final Table t = tables.getIgnoreCase(tableName); if (t == null) { msg += "\"partition\" element has unknown \"table\" attribute '" + tableName + "'"; throw new VoltCompilerException(msg); } final Column c = t.getColumns().getIgnoreCase(colName); // make sure the column exists if (c == null) { msg += "\"partition\" element has unknown \"column\" attribute '" + colName + "'"; throw new VoltCompilerException(msg); } // make sure the column is marked not-nullable if (c.getNullable() == true) { msg += "Partition column '" + tableName + "." + colName + "' is nullable. " + "Partition columns must be constrained \"NOT NULL\"."; throw new VoltCompilerException(msg); } t.setPartitioncolumn(c); t.setIsreplicated(false); // Set the destination tables of associated views non-replicated. // If a view's source table is replicated, then a full scan of the // associated view is singled-sited. If the source is partitioned, // a full scan of the view must be distributed. final CatalogMap<MaterializedViewInfo> views = t.getViews(); for (final MaterializedViewInfo mvi : views) { mvi.getDest().setIsreplicated(false); } } // add vertical partitions if (database.getVerticalpartitions() != null) { for (Verticalpartition vp : database.getVerticalpartitions().getVerticalpartition()) { try { addVerticalPartition(db, vp.getTable(), vp.getColumn(), vp.isIndexed()); } catch (Exception ex) { throw new VoltCompilerException( "Failed to create vertical partition for " + vp.getTable(), ex); } } } // this should reorder the tables and partitions all alphabetically String catData = m_catalog.serialize(); m_catalog = new Catalog(); m_catalog.execute(catData); db = m_catalog.getClusters().get("cluster").getDatabases().get(databaseName); // add database estimates info addDatabaseEstimatesInfo(m_estimates, db); addSystemProcsToCatalog(m_catalog, db); // Process and add exports and connectors to the catalog // Must do this before compiling procedures to deny updates // on append-only tables. if (database.getExports() != null) { // currently, only a single connector is allowed Connector conn = database.getExports().getConnector(); compileConnector(conn, db); } // Actually parse and handle all the Procedures for (final ProcedureDescriptor procedureDescriptor : procedures) { final String procedureName = procedureDescriptor.m_className; m_currentFilename = procedureName.substring(procedureName.lastIndexOf('.') + 1); m_currentFilename += ".class"; ProcedureCompiler.compile(this, m_hsql, m_estimates, m_catalog, db, procedureDescriptor); } // Add all the class dependencies to the output jar for (final Class<?> classDependency : classDependencies) { addClassToJar(classDependency, this); } m_hsql.close(); }
/** * @param projectFileURL URL of the project file. * @param clusterConfig Object containing desired physical cluster parameters * @param jarOutputPath The location to put the finished JAR to. * @param output Where to print status/errors to, usually stdout. * @param procInfoOverrides Optional overridden values for procedure annotations. */ public boolean compile( final String projectFileURL, final ClusterConfig clusterConfig, final String jarOutputPath, final PrintStream output, final Map<String, ProcInfoData> procInfoOverrides) { m_hsql = null; m_projectFileURL = projectFileURL; m_jarOutputPath = jarOutputPath; m_outputStream = output; // use this map as default annotation values m_procInfoOverrides = procInfoOverrides; LOG.l7dlog( Level.DEBUG, LogKeys.compiler_VoltCompiler_LeaderAndHostCountAndSitesPerHost.name(), new Object[] { clusterConfig.getLeaderAddress(), clusterConfig.getHostCount(), clusterConfig.getSitesPerHost() }, null); // do all the work to get the catalog final Catalog catalog = compileCatalog(projectFileURL, clusterConfig); if (catalog == null) { LOG.error( "VoltCompiler had " + m_errors.size() + " errors\n" + StringUtil.join("\n", m_errors)); return (false); } // WRITE CATALOG TO JAR HERE final String catalogCommands = catalog.serialize(); byte[] catalogBytes = null; try { catalogBytes = catalogCommands.getBytes("UTF-8"); } catch (final UnsupportedEncodingException e1) { addErr("Can't encode the compiled catalog file correctly"); return false; } // Create Dtxn.Coordinator configuration for cluster // byte[] dtxnConfBytes = null; // try { // dtxnConfBytes = HStoreDtxnConf.toHStoreDtxnConf(catalog).getBytes("UTF-8"); // } catch (final Exception e1) { // addErr("Can't encode the Dtxn.Coordinator configuration file correctly"); // return false; // } try { // m_jarBuilder.addEntry("dtxn.conf", dtxnConfBytes); m_jarBuilder.addEntry(CatalogUtil.CATALOG_FILENAME, catalogBytes); m_jarBuilder.addEntry("project.xml", new File(projectFileURL)); for (final Entry<String, String> e : m_ddlFilePaths.entrySet()) m_jarBuilder.addEntry(e.getKey(), new File(e.getValue())); m_jarBuilder.writeJarToDisk(jarOutputPath); } catch (final VoltCompilerException e) { return false; } assert (!hasErrors()); if (hasErrors()) { return false; } return true; }