public ClusterSchema loadClusterSchema(ObjectId id_cluster_schema, List<SlaveServer> slaveServers) throws KettleException { ClusterSchema clusterSchema = new ClusterSchema(); RowMetaAndData row = getClusterSchema(id_cluster_schema); clusterSchema.setObjectId(id_cluster_schema); clusterSchema.setName(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_NAME, null)); clusterSchema.setBasePort( row.getString(KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, null)); clusterSchema.setSocketsBufferSize( row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, null)); clusterSchema.setSocketsFlushInterval( row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, null)); clusterSchema.setSocketsCompressed( row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, true)); clusterSchema.setDynamic(row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, true)); ObjectId[] pids = repository.getClusterSlaveIDs(id_cluster_schema); for (int i = 0; i < pids.length; i++) { SlaveServer slaveServer = repository.loadSlaveServer(pids[i], null); // Load last version SlaveServer reference = SlaveServer.findSlaveServer(slaveServers, slaveServer.getName()); if (reference != null) { clusterSchema.getSlaveServers().add(reference); } else { clusterSchema.getSlaveServers().add(slaveServer); } } return clusterSchema; }
public void encodeClusterSchema(Element e, TransMeta transMeta) { JSONArray jsonArray = new JSONArray(); for (int i = 0; i < transMeta.getClusterSchemas().size(); i++) { ClusterSchema clusterSchema = transMeta.getClusterSchemas().get(i); JSONObject jsonObject = new JSONObject(); jsonObject.put("name", clusterSchema.getName()); jsonObject.put("base_port", clusterSchema.getBasePort()); jsonObject.put("sockets_buffer_size", clusterSchema.getSocketsBufferSize()); jsonObject.put("sockets_flush_interval", clusterSchema.getSocketsFlushInterval()); jsonObject.put("sockets_compressed", clusterSchema.isSocketsCompressed() ? "Y" : "N"); jsonObject.put("dynamic", clusterSchema.isDynamic() ? "Y" : "N"); JSONArray slaveservers = new JSONArray(); for (int j = 0; j < clusterSchema.getSlaveServers().size(); j++) { SlaveServer slaveServer = clusterSchema.getSlaveServers().get(j); slaveservers.add(SlaveServerCodec.encode(slaveServer)); } jsonObject.put("slaveservers", slaveservers); jsonArray.add(jsonObject); } e.setAttribute("clusterSchemas", jsonArray.toString()); }
/** * Add clusters in the repository to this transformation if they are not yet present. * * @param TransMeta The transformation to load into. * @param overWriteShared if an object with the same name exists, overwrite */ protected void readClusters( TransMeta transMeta, boolean overWriteShared, List<ClusterSchema> clusterSchemas) { for (ClusterSchema clusterSchema : clusterSchemas) { if (overWriteShared || transMeta.findClusterSchema(clusterSchema.getName()) == null) { if (!Const.isEmpty(clusterSchema.getName())) { clusterSchema.shareVariablesWith(transMeta); transMeta.addOrReplaceClusterSchema(clusterSchema); if (!overWriteShared) { clusterSchema.setChanged(false); } } } } }
public ClusterSchema decodeClusterSchema( JSONObject jsonObject, List<SlaveServer> referenceSlaveServers) { ClusterSchema clusterSchema = new ClusterSchema(); clusterSchema.setName(jsonObject.optString("name")); clusterSchema.setBasePort(jsonObject.optString("base_port")); clusterSchema.setSocketsBufferSize(jsonObject.optString("sockets_buffer_size")); clusterSchema.setSocketsFlushInterval(jsonObject.optString("sockets_flush_interval")); clusterSchema.setSocketsCompressed( "Y".equalsIgnoreCase(jsonObject.optString("sockets_compressed"))); clusterSchema.setDynamic("Y".equalsIgnoreCase(jsonObject.optString("dynamic"))); ArrayList<SlaveServer> slaveServers = new ArrayList<SlaveServer>(); JSONArray slavesNode = jsonObject.optJSONArray("slaveservers"); if (slavesNode != null) { for (int i = 0; i < slavesNode.size(); i++) { JSONObject slaveServerJson = slavesNode.getJSONObject(i); SlaveServer slaveServer = SlaveServer.findSlaveServer(referenceSlaveServers, slaveServerJson.optString("name")); if (slaveServer != null) { slaveServers.add(slaveServer); } } clusterSchema.setSlaveServers(slaveServers); } return clusterSchema; }
public void saveSharedObjects( final RepositoryElementInterface element, final String versionComment) throws KettleException { TransMeta transMeta = (TransMeta) element; // First store the databases and other depending objects in the transformation. // // Only store if the database has actually changed or doesn't have an object ID (imported) // for (DatabaseMeta databaseMeta : transMeta.getDatabases()) { if (databaseMeta.hasChanged() || databaseMeta.getObjectId() == null) { if (databaseMeta.getObjectId() == null || unifiedRepositoryConnectionAclService.hasAccess( databaseMeta.getObjectId(), RepositoryFilePermission.WRITE)) { repo.save(databaseMeta, versionComment, null); } else { log.logError( BaseMessages.getString( PKG, "PurRepository.ERROR_0004_DATABASE_UPDATE_ACCESS_DENIED", databaseMeta.getName())); } } } // Store the slave servers... // for (SlaveServer slaveServer : transMeta.getSlaveServers()) { if (slaveServer.hasChanged() || slaveServer.getObjectId() == null) { repo.save(slaveServer, versionComment, null); } } // Store the cluster schemas // for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) { if (clusterSchema.hasChanged() || clusterSchema.getObjectId() == null) { repo.save(clusterSchema, versionComment, null); } } // Save the partition schemas // for (PartitionSchema partitionSchema : transMeta.getPartitionSchemas()) { if (partitionSchema.hasChanged() || partitionSchema.getObjectId() == null) { repo.save(partitionSchema, versionComment, null); } } }
protected void replaceSharedObjects(TransMeta transMeta) throws KettleException { replaceSharedObjects((AbstractMeta) transMeta); for (ClusterSchema clusterSchema : getSharedObjects(ClusterSchema.class)) { int index = transMeta.getClusterSchemas().indexOf(clusterSchema); if (index < 0) { transMeta.getClusterSchemas().add(clusterSchema); } else { ClusterSchema imported = transMeta.getClusterSchemas().get(index); // Preserve the object id so we can update without having to look up the id imported.setObjectId(clusterSchema.getObjectId()); if (equals(clusterSchema, imported) || !getPromptResult( BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ClusterSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_CS)) { imported.replaceMeta(clusterSchema); // We didn't actually change anything imported.clearChanged(); } else { imported.setChanged(); } } } for (PartitionSchema partitionSchema : getSharedObjects(PartitionSchema.class)) { int index = transMeta.getPartitionSchemas().indexOf(partitionSchema); if (index < 0) { transMeta.getPartitionSchemas().add(partitionSchema); } else { PartitionSchema imported = transMeta.getPartitionSchemas().get(index); // Preserve the object id so we can update without having to look up the id imported.setObjectId(partitionSchema.getObjectId()); if (equals(partitionSchema, imported) || !getPromptResult( BaseMessages.getString( PKG, "RepositoryImporter.Dialog.PartitionSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_PS)) { imported.replaceMeta(partitionSchema); // We didn't actually change anything imported.clearChanged(); } else { imported.setChanged(); } } } }
private synchronized ObjectId insertCluster(ClusterSchema clusterSchema) throws KettleException { if (getClusterID(clusterSchema.getName()) != null) { // This cluster schema name is already in use. Throw an exception. throw new KettleObjectExistsException( "Failed to create object in repository. Object [" + clusterSchema.getName() + "] already exists."); } ObjectId id = repository.connectionDelegate.getNextClusterID(); RowMetaAndData table = new RowMetaAndData(); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER), id); table.addValue( new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING), clusterSchema.getName()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING), clusterSchema.getBasePort()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsBufferSize()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsFlushInterval()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isSocketsCompressed())); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isDynamic())); repository .connectionDelegate .getDatabase() .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_CLUSTER); repository.connectionDelegate.getDatabase().setValuesInsert(table); repository.connectionDelegate.getDatabase().insertRow(); repository.connectionDelegate.getDatabase().closeInsert(); return id; }
public void newClusteringSchema(TransMeta transMeta) { ClusterSchema clusterSchema = new ClusterSchema(); ClusterSchemaDialog dialog = new ClusterSchemaDialog( spoon.getShell(), clusterSchema, transMeta.getClusterSchemas(), transMeta.getSlaveServers()); if (dialog.open()) { List<ClusterSchema> clusterSchemas = transMeta.getClusterSchemas(); if (isDuplicate(clusterSchemas, clusterSchema)) { new ErrorDialog( spoon.getShell(), getMessage("Spoon.Dialog.ErrorSavingCluster.Title"), getMessage("Spoon.Dialog.ErrorSavingCluster.Message", clusterSchema.getName()), new KettleException(getMessage("Spoon.Dialog.ErrorSavingCluster.NotUnique"))); return; } clusterSchemas.add(clusterSchema); if (spoon.rep != null) { try { if (!spoon.rep.getSecurityProvider().isReadOnly()) { spoon.rep.save(clusterSchema, Const.VERSION_COMMENT_INITIAL_VERSION, null); } else { throw new KettleException( BaseMessages.getString(PKG, "Spoon.Dialog.Exception.ReadOnlyRepositoryUser")); } } catch (KettleException e) { new ErrorDialog( spoon.getShell(), getMessage("Spoon.Dialog.ErrorSavingCluster.Title"), getMessage("Spoon.Dialog.ErrorSavingCluster.Message", clusterSchema.getName()), e); } } spoon.refreshTree(); } }
public synchronized void updateCluster(ClusterSchema clusterSchema) throws KettleException { RowMetaAndData table = new RowMetaAndData(); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER), clusterSchema.getObjectId()); table.addValue( new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING), clusterSchema.getName()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING), clusterSchema.getBasePort()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsBufferSize()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsFlushInterval()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isSocketsCompressed())); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isDynamic())); repository.connectionDelegate.updateTableRow( KettleDatabaseRepository.TABLE_R_CLUSTER, KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, table, clusterSchema.getObjectId()); }
/** * @return The object that is selected in the tree or null if we couldn't figure it out. (titles * etc. == null) */ public TreeSelection[] getTreeObjects(final Tree tree, Tree selectionTree, Tree coreObjectsTree) { List<TreeSelection> objects = new ArrayList<TreeSelection>(); if (selectionTree != null && !selectionTree.isDisposed() && tree.equals(selectionTree)) { TreeItem[] selection = selectionTree.getSelection(); for (int s = 0; s < selection.length; s++) { TreeItem treeItem = selection[s]; String[] path = ConstUI.getTreeStrings(treeItem); TreeSelection object = null; switch (path.length) { case 0: break; case 1: // ------complete----- if (path[0].equals( Spoon.STRING_TRANSFORMATIONS)) { // the top level Transformations entry object = new TreeSelection(path[0], TransMeta.class); } if (path[0].equals(Spoon.STRING_JOBS)) { // the top level Jobs entry object = new TreeSelection(path[0], JobMeta.class); } break; case 2: // ------complete----- if (path[0].equals( Spoon.STRING_BUILDING_BLOCKS)) { // the top level Transformations entry if (path[1].equals(Spoon.STRING_TRANS_BASE)) { object = new TreeSelection(path[1], PluginInterface.class); } } if (path[0].equals(Spoon.STRING_TRANSFORMATIONS)) { // Transformation title object = new TreeSelection(path[1], spoon.delegates.trans.getTransformation(path[1])); } if (path[0].equals(Spoon.STRING_JOBS)) { // Jobs title object = new TreeSelection(path[1], spoon.delegates.jobs.getJob(path[1])); } break; case 3: // ------complete----- if (path[0].equals(Spoon.STRING_TRANSFORMATIONS)) { // Transformations title TransMeta transMeta = spoon.delegates.trans.getTransformation(path[1]); if (path[2].equals(Spoon.STRING_CONNECTIONS)) { object = new TreeSelection(path[2], DatabaseMeta.class, transMeta); } if (path[2].equals(Spoon.STRING_STEPS)) { object = new TreeSelection(path[2], StepMeta.class, transMeta); } if (path[2].equals(Spoon.STRING_HOPS)) { object = new TreeSelection(path[2], TransHopMeta.class, transMeta); } if (path[2].equals(Spoon.STRING_PARTITIONS)) { object = new TreeSelection(path[2], PartitionSchema.class, transMeta); } if (path[2].equals(Spoon.STRING_SLAVES)) { object = new TreeSelection(path[2], SlaveServer.class, transMeta); } if (path[2].equals(Spoon.STRING_CLUSTERS)) { object = new TreeSelection(path[2], ClusterSchema.class, transMeta); } } if (path[0].equals(Spoon.STRING_JOBS)) { // Jobs title JobMeta jobMeta = spoon.delegates.jobs.getJob(path[1]); if (path[2].equals(Spoon.STRING_CONNECTIONS)) { object = new TreeSelection(path[2], DatabaseMeta.class, jobMeta); } if (path[2].equals(Spoon.STRING_JOB_ENTRIES)) { object = new TreeSelection(path[2], JobEntryCopy.class, jobMeta); } if (path[2].equals(Spoon.STRING_SLAVES)) { object = new TreeSelection(path[2], SlaveServer.class, jobMeta); } } break; case 4: // ------complete----- if (path[0].equals(Spoon.STRING_TRANSFORMATIONS)) { // The name of a transformation final TransMeta transMeta = spoon.delegates.trans.getTransformation(path[1]); if (transMeta != null) { if (path[2].equals(Spoon.STRING_CONNECTIONS)) { String dbName = path[3]; DatabaseMeta databaseMeta = transMeta.findDatabase(dbName); if (databaseMeta != null) { dbName = databaseMeta.getName(); } object = new TreeSelection(dbName, databaseMeta, transMeta); } if (path[2].equals(Spoon.STRING_STEPS)) { object = new TreeSelection(path[3], transMeta.findStep(path[3]), transMeta); } if (path[2].equals(Spoon.STRING_HOPS)) { object = new TreeSelection(path[3], transMeta.findTransHop(path[3]), transMeta); } if (path[2].equals(Spoon.STRING_PARTITIONS)) { object = new TreeSelection(path[3], transMeta.findPartitionSchema(path[3]), transMeta); } if (path[2].equals(Spoon.STRING_SLAVES)) { object = new TreeSelection(path[3], transMeta.findSlaveServer(path[3]), transMeta); } if (path[2].equals(Spoon.STRING_CLUSTERS)) { object = new TreeSelection(path[3], transMeta.findClusterSchema(path[3]), transMeta); } } } if (path[0].equals(Spoon.STRING_JOBS)) { // The name of a job JobMeta jobMeta = spoon.delegates.jobs.getJob(path[1]); if (jobMeta != null && path[2].equals(Spoon.STRING_CONNECTIONS)) { String dbName = path[3]; DatabaseMeta databaseMeta = jobMeta.findDatabase(dbName); if (databaseMeta != null) { dbName = databaseMeta.getName(); } object = new TreeSelection(dbName, databaseMeta, jobMeta); } if (jobMeta != null && path[2].equals(Spoon.STRING_JOB_ENTRIES)) { object = new TreeSelection(path[3], jobMeta.findJobEntry(path[3]), jobMeta); } if (jobMeta != null && path[2].equals(Spoon.STRING_SLAVES)) { object = new TreeSelection(path[3], jobMeta.findSlaveServer(path[3]), jobMeta); } } break; case 5: if (path[0].equals(Spoon.STRING_TRANSFORMATIONS)) { // The name of a transformation TransMeta transMeta = spoon.delegates.trans.getTransformation(path[1]); if (transMeta != null && path[2].equals(Spoon.STRING_CLUSTERS)) { ClusterSchema clusterSchema = transMeta.findClusterSchema(path[3]); object = new TreeSelection( path[4], clusterSchema.findSlaveServer(path[4]), clusterSchema, transMeta); } } break; default: break; } if (object != null) { objects.add(object); } } } if (tree != null && coreObjectsTree != null && tree.equals(coreObjectsTree)) { TreeItem[] selection = coreObjectsTree.getSelection(); for (int s = 0; s < selection.length; s++) { TreeItem treeItem = selection[s]; String[] path = ConstUI.getTreeStrings(treeItem); TreeSelection object = null; switch (path.length) { case 0: break; case 2: // Job entries if (spoon.showJob) { PluginRegistry registry = PluginRegistry.getInstance(); Class<? extends PluginTypeInterface> pluginType = JobEntryPluginType.class; PluginInterface plugin = registry.findPluginWithName(pluginType, path[1]); // Retry for Start // if (plugin == null) { if (path[1].equals(JobMeta.STRING_SPECIAL_START)) { plugin = registry.findPluginWithId(pluginType, JobMeta.STRING_SPECIAL); } } // Retry for Dummy // if (plugin == null) { if (path[1].equals(JobMeta.STRING_SPECIAL_DUMMY)) { plugin = registry.findPluginWithId(pluginType, JobMeta.STRING_SPECIAL); } } if (plugin != null) { object = new TreeSelection(path[1], plugin); } } if (spoon.showTrans) { // Steps object = new TreeSelection( path[1], PluginRegistry.getInstance() .findPluginWithName(StepPluginType.class, path[1])); } break; default: break; } if (object != null) { objects.add(object); } } } return objects.toArray(new TreeSelection[objects.size()]); }
public void saveClusterSchema( ClusterSchema clusterSchema, String versionComment, ObjectId id_transformation, boolean isUsedByTransformation, boolean overwrite) throws KettleException { ObjectId existingClusterSchemaId = getClusterID(clusterSchema.getName()); if (existingClusterSchemaId != null) { clusterSchema.setObjectId(existingClusterSchemaId); } if (clusterSchema.getObjectId() == null) { // New Slave Server clusterSchema.setObjectId(insertCluster(clusterSchema)); } else { // If we received a clusterSchemaId and it is different from the cluster schema we are working // with... if (existingClusterSchemaId != null && !clusterSchema.getObjectId().equals(existingClusterSchemaId)) { // A cluster with this name already exists if (overwrite) { // Proceed with save, removing the original version from the repository first repository.deleteClusterSchema(existingClusterSchemaId); updateCluster(clusterSchema); } else { throw new KettleObjectExistsException( "Failed to save object to repository. Object [" + clusterSchema.getName() + "] already exists."); } } else { // There are no naming collisions (either it is the same object or the name is unique) updateCluster(clusterSchema); } } repository.delClusterSlaves(clusterSchema.getObjectId()); // Also save the used slave server references. for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) { SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i); if (slaveServer.getObjectId() == null) // oops, not yet saved! { repository.save( slaveServer, versionComment, null, id_transformation, isUsedByTransformation, overwrite); } repository.insertClusterSlave(clusterSchema, slaveServer); } // Save a link to the transformation to keep track of the use of this cluster schema // Only save it if it's really used by the transformation if (isUsedByTransformation) { repository.insertTransformationCluster(id_transformation, clusterSchema.getObjectId()); } }
public void dataNodeToElement(final DataNode rootNode, final RepositoryElementInterface element) throws KettleException { TransMeta transMeta = (TransMeta) element; List<String> privateTransformationDatabases = null; // read the private databases DataNode privateDatabases = rootNode.getNode(NODE_TRANS_PRIVATE_DATABASES); // if we have node than we use new format we could remove unexpected node if (privateDatabases != null) { privateTransformationDatabases = new ArrayList<String>(); for (DataNode privateDatabase : privateDatabases.getNodes()) { privateTransformationDatabases.add(privateDatabase.getName()); } } transMeta.setPrivateTransformationDatabases(privateTransformationDatabases); // read the steps... // DataNode stepsNode = rootNode.getNode(NODE_STEPS); for (DataNode stepNode : stepsNode.getNodes()) { StepMeta stepMeta = new StepMeta(new StringObjectId(stepNode.getId().toString())); stepMeta.setParentTransMeta(transMeta); // for tracing, retain hierarchy // Read the basics // stepMeta.setName(getString(stepNode, PROP_NAME)); if (stepNode.hasProperty(PROP_DESCRIPTION)) { stepMeta.setDescription(getString(stepNode, PROP_DESCRIPTION)); } stepMeta.setDistributes(stepNode.getProperty(PROP_STEP_DISTRIBUTE).getBoolean()); DataProperty rowDistributionProperty = stepNode.getProperty(PROP_STEP_ROW_DISTRIBUTION); String rowDistributionCode = rowDistributionProperty == null ? null : rowDistributionProperty.getString(); RowDistributionInterface rowDistribution = PluginRegistry.getInstance() .loadClass( RowDistributionPluginType.class, rowDistributionCode, RowDistributionInterface.class); stepMeta.setRowDistribution(rowDistribution); stepMeta.setDraw(stepNode.getProperty(PROP_STEP_GUI_DRAW).getBoolean()); int copies = (int) stepNode.getProperty(PROP_STEP_COPIES).getLong(); String copiesString = stepNode.getProperty(PROP_STEP_COPIES_STRING) != null ? stepNode.getProperty(PROP_STEP_COPIES_STRING).getString() : StringUtils.EMPTY; if (!Const.isEmpty(copiesString)) { stepMeta.setCopiesString(copiesString); } else { stepMeta.setCopies(copies); // for backward compatibility } int x = (int) stepNode.getProperty(PROP_STEP_GUI_LOCATION_X).getLong(); int y = (int) stepNode.getProperty(PROP_STEP_GUI_LOCATION_Y).getLong(); stepMeta.setLocation(x, y); // Load the group attributes map // AttributesMapUtil.loadAttributesMap(stepNode, stepMeta); String stepType = getString(stepNode, PROP_STEP_TYPE); // Create a new StepMetaInterface object... // PluginRegistry registry = PluginRegistry.getInstance(); PluginInterface stepPlugin = registry.findPluginWithId(StepPluginType.class, stepType); StepMetaInterface stepMetaInterface = null; if (stepPlugin != null) { stepMetaInterface = (StepMetaInterface) registry.loadClass(stepPlugin); stepType = stepPlugin.getIds()[0]; // revert to the default in case we loaded an alternate version } else { stepMeta.setStepMetaInterface( (StepMetaInterface) new MissingTrans(stepMeta.getName(), stepType)); transMeta.addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface()); } stepMeta.setStepID(stepType); // Read the metadata from the repository too... // RepositoryProxy proxy = new RepositoryProxy(stepNode.getNode(NODE_STEP_CUSTOM)); if (!stepMeta.isMissing()) { readRepCompatibleStepMeta(stepMetaInterface, proxy, null, transMeta.getDatabases()); stepMetaInterface.readRep(proxy, transMeta.getMetaStore(), null, transMeta.getDatabases()); stepMeta.setStepMetaInterface(stepMetaInterface); } // Get the partitioning as well... StepPartitioningMeta stepPartitioningMeta = new StepPartitioningMeta(); if (stepNode.hasProperty(PROP_PARTITIONING_SCHEMA)) { String partSchemaId = stepNode.getProperty(PROP_PARTITIONING_SCHEMA).getRef().getId().toString(); String schemaName = repo.loadPartitionSchema(new StringObjectId(partSchemaId), null).getName(); stepPartitioningMeta.setPartitionSchemaName(schemaName); String methodCode = getString(stepNode, PROP_PARTITIONING_METHOD); stepPartitioningMeta.setMethod(StepPartitioningMeta.getMethod(methodCode)); if (stepPartitioningMeta.getPartitioner() != null) { proxy = new RepositoryProxy(stepNode.getNode(NODE_PARTITIONER_CUSTOM)); stepPartitioningMeta.getPartitioner().loadRep(proxy, null); } stepPartitioningMeta.hasChanged(true); } stepMeta.setStepPartitioningMeta(stepPartitioningMeta); stepMeta .getStepPartitioningMeta() .setPartitionSchemaAfterLoading(transMeta.getPartitionSchemas()); // Get the cluster schema name String clusterSchemaName = getString(stepNode, PROP_CLUSTER_SCHEMA); stepMeta.setClusterSchemaName(clusterSchemaName); if (clusterSchemaName != null && transMeta.getClusterSchemas() != null) { // Get the cluster schema from the given name for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) { if (clusterSchema.getName().equals(clusterSchemaName)) { stepMeta.setClusterSchema(clusterSchema); break; } } } transMeta.addStep(stepMeta); } for (DataNode stepNode : stepsNode.getNodes()) { ObjectId stepObjectId = new StringObjectId(stepNode.getId().toString()); StepMeta stepMeta = StepMeta.findStep(transMeta.getSteps(), stepObjectId); // Also load the step error handling metadata // if (stepNode.hasProperty(PROP_STEP_ERROR_HANDLING_SOURCE_STEP)) { StepErrorMeta meta = new StepErrorMeta(transMeta, stepMeta); meta.setTargetStep( StepMeta.findStep( transMeta.getSteps(), stepNode.getProperty(PROP_STEP_ERROR_HANDLING_TARGET_STEP).getString())); meta.setEnabled(stepNode.getProperty(PROP_STEP_ERROR_HANDLING_IS_ENABLED).getBoolean()); meta.setNrErrorsValuename(getString(stepNode, PROP_STEP_ERROR_HANDLING_NR_VALUENAME)); meta.setErrorDescriptionsValuename( getString(stepNode, PROP_STEP_ERROR_HANDLING_DESCRIPTIONS_VALUENAME)); meta.setErrorFieldsValuename( getString(stepNode, PROP_STEP_ERROR_HANDLING_FIELDS_VALUENAME)); meta.setErrorCodesValuename(getString(stepNode, PROP_STEP_ERROR_HANDLING_CODES_VALUENAME)); meta.setMaxErrors(getString(stepNode, PROP_STEP_ERROR_HANDLING_MAX_ERRORS)); meta.setMaxPercentErrors(getString(stepNode, PROP_STEP_ERROR_HANDLING_MAX_PCT_ERRORS)); meta.setMinPercentRows(getString(stepNode, PROP_STEP_ERROR_HANDLING_MIN_PCT_ROWS)); meta.getSourceStep().setStepErrorMeta(meta); // a bit of a trick, I know. } } // Have all StreamValueLookups, etc. reference the correct source steps... // for (int i = 0; i < transMeta.nrSteps(); i++) { StepMeta stepMeta = transMeta.getStep(i); StepMetaInterface sii = stepMeta.getStepMetaInterface(); if (sii != null) { sii.searchInfoAndTargetSteps(transMeta.getSteps()); } } // Read the notes... // DataNode notesNode = rootNode.getNode(NODE_NOTES); int nrNotes = (int) notesNode.getProperty(PROP_NR_NOTES).getLong(); for (DataNode noteNode : notesNode.getNodes()) { String xml = getString(noteNode, PROP_XML); transMeta.addNote( new NotePadMeta( XMLHandler.getSubNode(XMLHandler.loadXMLString(xml), NotePadMeta.XML_TAG))); } if (transMeta.nrNotes() != nrNotes) { throw new KettleException( "The number of notes read [" + transMeta.nrNotes() + "] was not the number we expected [" + nrNotes + "]"); } // Read the hops... // DataNode hopsNode = rootNode.getNode(NODE_HOPS); int nrHops = (int) hopsNode.getProperty(PROP_NR_HOPS).getLong(); for (DataNode hopNode : hopsNode.getNodes()) { String stepFromName = getString(hopNode, TRANS_HOP_FROM); String stepToName = getString(hopNode, TRANS_HOP_TO); boolean enabled = true; if (hopNode.hasProperty(TRANS_HOP_ENABLED)) { enabled = hopNode.getProperty(TRANS_HOP_ENABLED).getBoolean(); } StepMeta stepFrom = StepMeta.findStep(transMeta.getSteps(), stepFromName); StepMeta stepTo = StepMeta.findStep(transMeta.getSteps(), stepToName); // Make sure to only accept valid hops PDI-5519 // if (stepFrom != null && stepTo != null) { transMeta.addTransHop(new TransHopMeta(stepFrom, stepTo, enabled)); } } if (transMeta.nrTransHops() != nrHops) { throw new KettleException( "The number of hops read [" + transMeta.nrTransHops() + "] was not the number we expected [" + nrHops + "]"); } // Load the details at the end, to make sure we reference the databases correctly, etc. // loadTransformationDetails(rootNode, transMeta); transMeta.eraseParameters(); DataNode paramsNode = rootNode.getNode(NODE_PARAMETERS); int count = (int) paramsNode.getProperty(PROP_NR_PARAMETERS).getLong(); for (int idx = 0; idx < count; idx++) { DataNode paramNode = paramsNode.getNode(TRANS_PARAM_PREFIX + idx); String key = getString(paramNode, PARAM_KEY); String def = getString(paramNode, PARAM_DEFAULT); String desc = getString(paramNode, PARAM_DESC); transMeta.addParameterDefinition(key, def, desc); } transMeta.activateParameters(); }
@Override public AbstractMeta decode(String graphXml) throws Exception { mxGraph graph = new mxGraph(); mxCodec codec = new mxCodec(); Document doc = mxUtils.parseXml(graphXml); codec.decode(doc.getDocumentElement(), graph.getModel()); mxCell root = (mxCell) graph.getDefaultParent(); TransMeta transMeta = new TransMeta(); decodeCommRootAttr(root, transMeta); transMeta.setTransstatus(Const.toInt(root.getAttribute("trans_status"), -1)); transMeta.setTransversion(root.getAttribute("trans_version")); if (transMeta.getRepository() != null) transMeta.setSharedObjects(transMeta.getRepository().readTransSharedObjects(transMeta)); else transMeta.setSharedObjects(transMeta.readSharedObjects()); transMeta.importFromMetaStore(); decodeDatabases(root, transMeta); decodeNote(graph, transMeta); int count = graph.getModel().getChildCount(root); for (int i = 0; i < count; i++) { mxCell cell = (mxCell) graph.getModel().getChildAt(root, i); if (cell.isVertex()) { Element e = (Element) cell.getValue(); if (PropsUI.TRANS_STEP_NAME.equals(e.getTagName())) { StepDecoder stepDecoder = (StepDecoder) PluginFactory.getBean(cell.getAttribute("ctype")); StepMeta stepMeta = stepDecoder.decodeStep(cell, transMeta.getDatabases(), transMeta.getMetaStore()); stepMeta.setParentTransMeta(transMeta); if (stepMeta.isMissing()) { transMeta.addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface()); } StepMeta check = transMeta.findStep(stepMeta.getName()); if (check != null) { if (!check.isShared()) { // Don't overwrite shared objects transMeta.addOrReplaceStep(stepMeta); } else { check.setDraw(stepMeta.isDrawn()); // Just keep the drawn flag and location check.setLocation(stepMeta.getLocation()); } } else { transMeta.addStep(stepMeta); // simply add it. } } } } // Have all StreamValueLookups, etc. reference the correct source steps... // for (int i = 0; i < transMeta.nrSteps(); i++) { StepMeta stepMeta = transMeta.getStep(i); StepMetaInterface sii = stepMeta.getStepMetaInterface(); if (sii != null) { sii.searchInfoAndTargetSteps(transMeta.getSteps()); } } count = graph.getModel().getChildCount(root); for (int i = 0; i < count; i++) { mxCell cell = (mxCell) graph.getModel().getChildAt(root, i); if (cell.isEdge()) { mxCell source = (mxCell) cell.getSource(); mxCell target = (mxCell) cell.getTarget(); TransHopMeta hopinf = new TransHopMeta(null, null, true); String[] stepNames = transMeta.getStepNames(); for (int j = 0; j < stepNames.length; j++) { if (stepNames[j].equalsIgnoreCase(source.getAttribute("label"))) hopinf.setFromStep(transMeta.getStep(j)); if (stepNames[j].equalsIgnoreCase(target.getAttribute("label"))) hopinf.setToStep(transMeta.getStep(j)); } transMeta.addTransHop(hopinf); } } JSONObject jsonObject = JSONObject.fromObject(root.getAttribute("transLogTable")); TransLogTable transLogTable = transMeta.getTransLogTable(); transLogTable.setConnectionName(jsonObject.optString("connection")); transLogTable.setSchemaName(jsonObject.optString("schema")); transLogTable.setTableName(jsonObject.optString("table")); transLogTable.setLogSizeLimit(jsonObject.optString("size_limit_lines")); transLogTable.setLogInterval(jsonObject.optString("interval")); transLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); JSONArray jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = transLogTable.findField(id); if (field == null) { field = transLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); field.setSubject(StepMeta.findStep(transMeta.getSteps(), fieldJson.optString("subject"))); } } } jsonObject = JSONObject.fromObject(root.getAttribute("stepLogTable")); StepLogTable stepLogTable = transMeta.getStepLogTable(); stepLogTable.setConnectionName(jsonObject.optString("connection")); stepLogTable.setSchemaName(jsonObject.optString("schema")); stepLogTable.setTableName(jsonObject.optString("table")); stepLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = stepLogTable.findField(id); if (field == null && i < stepLogTable.getFields().size()) { field = stepLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonObject = JSONObject.fromObject(root.getAttribute("performanceLogTable")); PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); performanceLogTable.setConnectionName(jsonObject.optString("connection")); performanceLogTable.setSchemaName(jsonObject.optString("schema")); performanceLogTable.setTableName(jsonObject.optString("table")); performanceLogTable.setLogInterval(jsonObject.optString("interval")); performanceLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = performanceLogTable.findField(id); if (field == null && i < performanceLogTable.getFields().size()) { field = performanceLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonObject = JSONObject.fromObject(root.getAttribute("metricsLogTable")); MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); metricsLogTable.setConnectionName(jsonObject.optString("connection")); metricsLogTable.setSchemaName(jsonObject.optString("schema")); metricsLogTable.setTableName(jsonObject.optString("table")); metricsLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = metricsLogTable.findField(id); if (field == null && i < metricsLogTable.getFields().size()) { field = metricsLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonArray = JSONArray.fromObject(root.getAttribute("partitionschemas")); for (int i = 0; i < jsonArray.size(); i++) { jsonObject = jsonArray.getJSONObject(i); PartitionSchema partitionSchema = decodePartitionSchema(jsonObject); PartitionSchema check = transMeta.findPartitionSchema(partitionSchema.getName()); if (check != null) { if (!check.isShared()) { transMeta.addOrReplacePartitionSchema(partitionSchema); } } else { transMeta.getPartitionSchemas().add(partitionSchema); } } decodeSlaveServers(root, transMeta); jsonArray = JSONArray.fromObject(root.getAttribute("clusterSchemas")); for (int i = 0; i < jsonArray.size(); i++) { jsonObject = jsonArray.getJSONObject(i); ClusterSchema clusterSchema = decodeClusterSchema(jsonObject, transMeta.getSlaveServers()); clusterSchema.shareVariablesWith(transMeta); ClusterSchema check = transMeta.findClusterSchema(clusterSchema.getName()); if (check != null) { if (!check.isShared()) { transMeta.addOrReplaceClusterSchema(clusterSchema); } } else { transMeta.getClusterSchemas().add(clusterSchema); } } for (int i = 0; i < transMeta.nrSteps(); i++) { transMeta.getStep(i).setClusterSchemaAfterLoading(transMeta.getClusterSchemas()); } transMeta.setSizeRowset(Const.toInt(root.getAttribute("size_rowset"), Const.ROWS_IN_ROWSET)); transMeta.setSleepTimeEmpty( Const.toInt(root.getAttribute("sleep_time_empty"), Const.TIMEOUT_GET_MILLIS)); transMeta.setSleepTimeFull( Const.toInt(root.getAttribute("sleep_time_full"), Const.TIMEOUT_PUT_MILLIS)); transMeta.setUsingUniqueConnections( "Y".equalsIgnoreCase(root.getAttribute("unique_connections"))); transMeta.setFeedbackShown(!"N".equalsIgnoreCase(root.getAttribute("feedback_shown"))); transMeta.setFeedbackSize(Const.toInt(root.getAttribute("feedback_size"), Const.ROWS_UPDATE)); transMeta.setUsingThreadPriorityManagment( !"N".equalsIgnoreCase(root.getAttribute("using_thread_priorities"))); transMeta.setCapturingStepPerformanceSnapShots( "Y".equalsIgnoreCase(root.getAttribute("capture_step_performance"))); transMeta.setStepPerformanceCapturingDelay( Const.toLong(root.getAttribute("step_performance_capturing_delay"), 1000)); transMeta.setStepPerformanceCapturingSizeLimit( root.getAttribute("step_performance_capturing_size_limit")); transMeta.setKey(XMLHandler.stringToBinary(root.getAttribute("key_for_session_key"))); transMeta.setPrivateKey("Y".equals(root.getAttribute("is_key_private"))); return transMeta; }
protected boolean equals(ClusterSchema clusterSchema, ClusterSchema clusterSchema2) { if (!equals(clusterSchema.getName(), clusterSchema2.getName())) { return false; } else if (!equals(clusterSchema.getBasePort(), clusterSchema2.getBasePort())) { return false; } else if (!equals( clusterSchema.getSocketsBufferSize(), clusterSchema2.getSocketsBufferSize())) { return false; } else if (!equals( clusterSchema.getSocketsFlushInterval(), clusterSchema2.getSocketsFlushInterval())) { return false; } else if (!equals(clusterSchema.isSocketsCompressed(), clusterSchema2.isSocketsCompressed())) { return false; } else if (!equals(clusterSchema.isDynamic(), clusterSchema2.isDynamic())) { return false; } else if (!equals(clusterSchema.getSlaveServers(), clusterSchema2.getSlaveServers())) { return false; } return true; }