private synchronized ObjectId insertCluster(ClusterSchema clusterSchema) throws KettleException { if (getClusterID(clusterSchema.getName()) != null) { // This cluster schema name is already in use. Throw an exception. throw new KettleObjectExistsException( "Failed to create object in repository. Object [" + clusterSchema.getName() + "] already exists."); } ObjectId id = repository.connectionDelegate.getNextClusterID(); RowMetaAndData table = new RowMetaAndData(); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER), id); table.addValue( new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING), clusterSchema.getName()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING), clusterSchema.getBasePort()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsBufferSize()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsFlushInterval()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isSocketsCompressed())); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isDynamic())); repository .connectionDelegate .getDatabase() .prepareInsert(table.getRowMeta(), KettleDatabaseRepository.TABLE_R_CLUSTER); repository.connectionDelegate.getDatabase().setValuesInsert(table); repository.connectionDelegate.getDatabase().insertRow(); repository.connectionDelegate.getDatabase().closeInsert(); return id; }
/** * Add clusters in the repository to this transformation if they are not yet present. * * @param TransMeta The transformation to load into. * @param overWriteShared if an object with the same name exists, overwrite */ protected void readClusters( TransMeta transMeta, boolean overWriteShared, List<ClusterSchema> clusterSchemas) { for (ClusterSchema clusterSchema : clusterSchemas) { if (overWriteShared || transMeta.findClusterSchema(clusterSchema.getName()) == null) { if (!Const.isEmpty(clusterSchema.getName())) { clusterSchema.shareVariablesWith(transMeta); transMeta.addOrReplaceClusterSchema(clusterSchema); if (!overWriteShared) { clusterSchema.setChanged(false); } } } } }
public void encodeClusterSchema(Element e, TransMeta transMeta) { JSONArray jsonArray = new JSONArray(); for (int i = 0; i < transMeta.getClusterSchemas().size(); i++) { ClusterSchema clusterSchema = transMeta.getClusterSchemas().get(i); JSONObject jsonObject = new JSONObject(); jsonObject.put("name", clusterSchema.getName()); jsonObject.put("base_port", clusterSchema.getBasePort()); jsonObject.put("sockets_buffer_size", clusterSchema.getSocketsBufferSize()); jsonObject.put("sockets_flush_interval", clusterSchema.getSocketsFlushInterval()); jsonObject.put("sockets_compressed", clusterSchema.isSocketsCompressed() ? "Y" : "N"); jsonObject.put("dynamic", clusterSchema.isDynamic() ? "Y" : "N"); JSONArray slaveservers = new JSONArray(); for (int j = 0; j < clusterSchema.getSlaveServers().size(); j++) { SlaveServer slaveServer = clusterSchema.getSlaveServers().get(j); slaveservers.add(SlaveServerCodec.encode(slaveServer)); } jsonObject.put("slaveservers", slaveservers); jsonArray.add(jsonObject); } e.setAttribute("clusterSchemas", jsonArray.toString()); }
protected void replaceSharedObjects(TransMeta transMeta) throws KettleException { replaceSharedObjects((AbstractMeta) transMeta); for (ClusterSchema clusterSchema : getSharedObjects(ClusterSchema.class)) { int index = transMeta.getClusterSchemas().indexOf(clusterSchema); if (index < 0) { transMeta.getClusterSchemas().add(clusterSchema); } else { ClusterSchema imported = transMeta.getClusterSchemas().get(index); // Preserve the object id so we can update without having to look up the id imported.setObjectId(clusterSchema.getObjectId()); if (equals(clusterSchema, imported) || !getPromptResult( BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ClusterSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_CS)) { imported.replaceMeta(clusterSchema); // We didn't actually change anything imported.clearChanged(); } else { imported.setChanged(); } } } for (PartitionSchema partitionSchema : getSharedObjects(PartitionSchema.class)) { int index = transMeta.getPartitionSchemas().indexOf(partitionSchema); if (index < 0) { transMeta.getPartitionSchemas().add(partitionSchema); } else { PartitionSchema imported = transMeta.getPartitionSchemas().get(index); // Preserve the object id so we can update without having to look up the id imported.setObjectId(partitionSchema.getObjectId()); if (equals(partitionSchema, imported) || !getPromptResult( BaseMessages.getString( PKG, "RepositoryImporter.Dialog.PartitionSchemaExistsOverWrite.Message", imported.getName()), BaseMessages.getString( PKG, "RepositoryImporter.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"), IMPORT_ASK_ABOUT_REPLACE_PS)) { imported.replaceMeta(partitionSchema); // We didn't actually change anything imported.clearChanged(); } else { imported.setChanged(); } } } }
public void newClusteringSchema(TransMeta transMeta) { ClusterSchema clusterSchema = new ClusterSchema(); ClusterSchemaDialog dialog = new ClusterSchemaDialog( spoon.getShell(), clusterSchema, transMeta.getClusterSchemas(), transMeta.getSlaveServers()); if (dialog.open()) { List<ClusterSchema> clusterSchemas = transMeta.getClusterSchemas(); if (isDuplicate(clusterSchemas, clusterSchema)) { new ErrorDialog( spoon.getShell(), getMessage("Spoon.Dialog.ErrorSavingCluster.Title"), getMessage("Spoon.Dialog.ErrorSavingCluster.Message", clusterSchema.getName()), new KettleException(getMessage("Spoon.Dialog.ErrorSavingCluster.NotUnique"))); return; } clusterSchemas.add(clusterSchema); if (spoon.rep != null) { try { if (!spoon.rep.getSecurityProvider().isReadOnly()) { spoon.rep.save(clusterSchema, Const.VERSION_COMMENT_INITIAL_VERSION, null); } else { throw new KettleException( BaseMessages.getString(PKG, "Spoon.Dialog.Exception.ReadOnlyRepositoryUser")); } } catch (KettleException e) { new ErrorDialog( spoon.getShell(), getMessage("Spoon.Dialog.ErrorSavingCluster.Title"), getMessage("Spoon.Dialog.ErrorSavingCluster.Message", clusterSchema.getName()), e); } } spoon.refreshTree(); } }
protected boolean equals(ClusterSchema clusterSchema, ClusterSchema clusterSchema2) { if (!equals(clusterSchema.getName(), clusterSchema2.getName())) { return false; } else if (!equals(clusterSchema.getBasePort(), clusterSchema2.getBasePort())) { return false; } else if (!equals( clusterSchema.getSocketsBufferSize(), clusterSchema2.getSocketsBufferSize())) { return false; } else if (!equals( clusterSchema.getSocketsFlushInterval(), clusterSchema2.getSocketsFlushInterval())) { return false; } else if (!equals(clusterSchema.isSocketsCompressed(), clusterSchema2.isSocketsCompressed())) { return false; } else if (!equals(clusterSchema.isDynamic(), clusterSchema2.isDynamic())) { return false; } else if (!equals(clusterSchema.getSlaveServers(), clusterSchema2.getSlaveServers())) { return false; } return true; }
public synchronized void updateCluster(ClusterSchema clusterSchema) throws KettleException { RowMetaAndData table = new RowMetaAndData(); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER), clusterSchema.getObjectId()); table.addValue( new ValueMeta(KettleDatabaseRepository.FIELD_CLUSTER_NAME, ValueMetaInterface.TYPE_STRING), clusterSchema.getName()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, ValueMetaInterface.TYPE_STRING), clusterSchema.getBasePort()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsBufferSize()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, ValueMetaInterface.TYPE_STRING), clusterSchema.getSocketsFlushInterval()); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isSocketsCompressed())); table.addValue( new ValueMeta( KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, ValueMetaInterface.TYPE_BOOLEAN), Boolean.valueOf(clusterSchema.isDynamic())); repository.connectionDelegate.updateTableRow( KettleDatabaseRepository.TABLE_R_CLUSTER, KettleDatabaseRepository.FIELD_CLUSTER_ID_CLUSTER, table, clusterSchema.getObjectId()); }
public void saveClusterSchema( ClusterSchema clusterSchema, String versionComment, ObjectId id_transformation, boolean isUsedByTransformation, boolean overwrite) throws KettleException { ObjectId existingClusterSchemaId = getClusterID(clusterSchema.getName()); if (existingClusterSchemaId != null) { clusterSchema.setObjectId(existingClusterSchemaId); } if (clusterSchema.getObjectId() == null) { // New Slave Server clusterSchema.setObjectId(insertCluster(clusterSchema)); } else { // If we received a clusterSchemaId and it is different from the cluster schema we are working // with... if (existingClusterSchemaId != null && !clusterSchema.getObjectId().equals(existingClusterSchemaId)) { // A cluster with this name already exists if (overwrite) { // Proceed with save, removing the original version from the repository first repository.deleteClusterSchema(existingClusterSchemaId); updateCluster(clusterSchema); } else { throw new KettleObjectExistsException( "Failed to save object to repository. Object [" + clusterSchema.getName() + "] already exists."); } } else { // There are no naming collisions (either it is the same object or the name is unique) updateCluster(clusterSchema); } } repository.delClusterSlaves(clusterSchema.getObjectId()); // Also save the used slave server references. for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) { SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i); if (slaveServer.getObjectId() == null) // oops, not yet saved! { repository.save( slaveServer, versionComment, null, id_transformation, isUsedByTransformation, overwrite); } repository.insertClusterSlave(clusterSchema, slaveServer); } // Save a link to the transformation to keep track of the use of this cluster schema // Only save it if it's really used by the transformation if (isUsedByTransformation) { repository.insertTransformationCluster(id_transformation, clusterSchema.getObjectId()); } }
public void dataNodeToElement(final DataNode rootNode, final RepositoryElementInterface element) throws KettleException { TransMeta transMeta = (TransMeta) element; List<String> privateTransformationDatabases = null; // read the private databases DataNode privateDatabases = rootNode.getNode(NODE_TRANS_PRIVATE_DATABASES); // if we have node than we use new format we could remove unexpected node if (privateDatabases != null) { privateTransformationDatabases = new ArrayList<String>(); for (DataNode privateDatabase : privateDatabases.getNodes()) { privateTransformationDatabases.add(privateDatabase.getName()); } } transMeta.setPrivateTransformationDatabases(privateTransformationDatabases); // read the steps... // DataNode stepsNode = rootNode.getNode(NODE_STEPS); for (DataNode stepNode : stepsNode.getNodes()) { StepMeta stepMeta = new StepMeta(new StringObjectId(stepNode.getId().toString())); stepMeta.setParentTransMeta(transMeta); // for tracing, retain hierarchy // Read the basics // stepMeta.setName(getString(stepNode, PROP_NAME)); if (stepNode.hasProperty(PROP_DESCRIPTION)) { stepMeta.setDescription(getString(stepNode, PROP_DESCRIPTION)); } stepMeta.setDistributes(stepNode.getProperty(PROP_STEP_DISTRIBUTE).getBoolean()); DataProperty rowDistributionProperty = stepNode.getProperty(PROP_STEP_ROW_DISTRIBUTION); String rowDistributionCode = rowDistributionProperty == null ? null : rowDistributionProperty.getString(); RowDistributionInterface rowDistribution = PluginRegistry.getInstance() .loadClass( RowDistributionPluginType.class, rowDistributionCode, RowDistributionInterface.class); stepMeta.setRowDistribution(rowDistribution); stepMeta.setDraw(stepNode.getProperty(PROP_STEP_GUI_DRAW).getBoolean()); int copies = (int) stepNode.getProperty(PROP_STEP_COPIES).getLong(); String copiesString = stepNode.getProperty(PROP_STEP_COPIES_STRING) != null ? stepNode.getProperty(PROP_STEP_COPIES_STRING).getString() : StringUtils.EMPTY; if (!Const.isEmpty(copiesString)) { stepMeta.setCopiesString(copiesString); } else { stepMeta.setCopies(copies); // for backward compatibility } int x = (int) stepNode.getProperty(PROP_STEP_GUI_LOCATION_X).getLong(); int y = (int) stepNode.getProperty(PROP_STEP_GUI_LOCATION_Y).getLong(); stepMeta.setLocation(x, y); // Load the group attributes map // AttributesMapUtil.loadAttributesMap(stepNode, stepMeta); String stepType = getString(stepNode, PROP_STEP_TYPE); // Create a new StepMetaInterface object... // PluginRegistry registry = PluginRegistry.getInstance(); PluginInterface stepPlugin = registry.findPluginWithId(StepPluginType.class, stepType); StepMetaInterface stepMetaInterface = null; if (stepPlugin != null) { stepMetaInterface = (StepMetaInterface) registry.loadClass(stepPlugin); stepType = stepPlugin.getIds()[0]; // revert to the default in case we loaded an alternate version } else { stepMeta.setStepMetaInterface( (StepMetaInterface) new MissingTrans(stepMeta.getName(), stepType)); transMeta.addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface()); } stepMeta.setStepID(stepType); // Read the metadata from the repository too... // RepositoryProxy proxy = new RepositoryProxy(stepNode.getNode(NODE_STEP_CUSTOM)); if (!stepMeta.isMissing()) { readRepCompatibleStepMeta(stepMetaInterface, proxy, null, transMeta.getDatabases()); stepMetaInterface.readRep(proxy, transMeta.getMetaStore(), null, transMeta.getDatabases()); stepMeta.setStepMetaInterface(stepMetaInterface); } // Get the partitioning as well... StepPartitioningMeta stepPartitioningMeta = new StepPartitioningMeta(); if (stepNode.hasProperty(PROP_PARTITIONING_SCHEMA)) { String partSchemaId = stepNode.getProperty(PROP_PARTITIONING_SCHEMA).getRef().getId().toString(); String schemaName = repo.loadPartitionSchema(new StringObjectId(partSchemaId), null).getName(); stepPartitioningMeta.setPartitionSchemaName(schemaName); String methodCode = getString(stepNode, PROP_PARTITIONING_METHOD); stepPartitioningMeta.setMethod(StepPartitioningMeta.getMethod(methodCode)); if (stepPartitioningMeta.getPartitioner() != null) { proxy = new RepositoryProxy(stepNode.getNode(NODE_PARTITIONER_CUSTOM)); stepPartitioningMeta.getPartitioner().loadRep(proxy, null); } stepPartitioningMeta.hasChanged(true); } stepMeta.setStepPartitioningMeta(stepPartitioningMeta); stepMeta .getStepPartitioningMeta() .setPartitionSchemaAfterLoading(transMeta.getPartitionSchemas()); // Get the cluster schema name String clusterSchemaName = getString(stepNode, PROP_CLUSTER_SCHEMA); stepMeta.setClusterSchemaName(clusterSchemaName); if (clusterSchemaName != null && transMeta.getClusterSchemas() != null) { // Get the cluster schema from the given name for (ClusterSchema clusterSchema : transMeta.getClusterSchemas()) { if (clusterSchema.getName().equals(clusterSchemaName)) { stepMeta.setClusterSchema(clusterSchema); break; } } } transMeta.addStep(stepMeta); } for (DataNode stepNode : stepsNode.getNodes()) { ObjectId stepObjectId = new StringObjectId(stepNode.getId().toString()); StepMeta stepMeta = StepMeta.findStep(transMeta.getSteps(), stepObjectId); // Also load the step error handling metadata // if (stepNode.hasProperty(PROP_STEP_ERROR_HANDLING_SOURCE_STEP)) { StepErrorMeta meta = new StepErrorMeta(transMeta, stepMeta); meta.setTargetStep( StepMeta.findStep( transMeta.getSteps(), stepNode.getProperty(PROP_STEP_ERROR_HANDLING_TARGET_STEP).getString())); meta.setEnabled(stepNode.getProperty(PROP_STEP_ERROR_HANDLING_IS_ENABLED).getBoolean()); meta.setNrErrorsValuename(getString(stepNode, PROP_STEP_ERROR_HANDLING_NR_VALUENAME)); meta.setErrorDescriptionsValuename( getString(stepNode, PROP_STEP_ERROR_HANDLING_DESCRIPTIONS_VALUENAME)); meta.setErrorFieldsValuename( getString(stepNode, PROP_STEP_ERROR_HANDLING_FIELDS_VALUENAME)); meta.setErrorCodesValuename(getString(stepNode, PROP_STEP_ERROR_HANDLING_CODES_VALUENAME)); meta.setMaxErrors(getString(stepNode, PROP_STEP_ERROR_HANDLING_MAX_ERRORS)); meta.setMaxPercentErrors(getString(stepNode, PROP_STEP_ERROR_HANDLING_MAX_PCT_ERRORS)); meta.setMinPercentRows(getString(stepNode, PROP_STEP_ERROR_HANDLING_MIN_PCT_ROWS)); meta.getSourceStep().setStepErrorMeta(meta); // a bit of a trick, I know. } } // Have all StreamValueLookups, etc. reference the correct source steps... // for (int i = 0; i < transMeta.nrSteps(); i++) { StepMeta stepMeta = transMeta.getStep(i); StepMetaInterface sii = stepMeta.getStepMetaInterface(); if (sii != null) { sii.searchInfoAndTargetSteps(transMeta.getSteps()); } } // Read the notes... // DataNode notesNode = rootNode.getNode(NODE_NOTES); int nrNotes = (int) notesNode.getProperty(PROP_NR_NOTES).getLong(); for (DataNode noteNode : notesNode.getNodes()) { String xml = getString(noteNode, PROP_XML); transMeta.addNote( new NotePadMeta( XMLHandler.getSubNode(XMLHandler.loadXMLString(xml), NotePadMeta.XML_TAG))); } if (transMeta.nrNotes() != nrNotes) { throw new KettleException( "The number of notes read [" + transMeta.nrNotes() + "] was not the number we expected [" + nrNotes + "]"); } // Read the hops... // DataNode hopsNode = rootNode.getNode(NODE_HOPS); int nrHops = (int) hopsNode.getProperty(PROP_NR_HOPS).getLong(); for (DataNode hopNode : hopsNode.getNodes()) { String stepFromName = getString(hopNode, TRANS_HOP_FROM); String stepToName = getString(hopNode, TRANS_HOP_TO); boolean enabled = true; if (hopNode.hasProperty(TRANS_HOP_ENABLED)) { enabled = hopNode.getProperty(TRANS_HOP_ENABLED).getBoolean(); } StepMeta stepFrom = StepMeta.findStep(transMeta.getSteps(), stepFromName); StepMeta stepTo = StepMeta.findStep(transMeta.getSteps(), stepToName); // Make sure to only accept valid hops PDI-5519 // if (stepFrom != null && stepTo != null) { transMeta.addTransHop(new TransHopMeta(stepFrom, stepTo, enabled)); } } if (transMeta.nrTransHops() != nrHops) { throw new KettleException( "The number of hops read [" + transMeta.nrTransHops() + "] was not the number we expected [" + nrHops + "]"); } // Load the details at the end, to make sure we reference the databases correctly, etc. // loadTransformationDetails(rootNode, transMeta); transMeta.eraseParameters(); DataNode paramsNode = rootNode.getNode(NODE_PARAMETERS); int count = (int) paramsNode.getProperty(PROP_NR_PARAMETERS).getLong(); for (int idx = 0; idx < count; idx++) { DataNode paramNode = paramsNode.getNode(TRANS_PARAM_PREFIX + idx); String key = getString(paramNode, PARAM_KEY); String def = getString(paramNode, PARAM_DEFAULT); String desc = getString(paramNode, PARAM_DESC); transMeta.addParameterDefinition(key, def, desc); } transMeta.activateParameters(); }
@Override public AbstractMeta decode(String graphXml) throws Exception { mxGraph graph = new mxGraph(); mxCodec codec = new mxCodec(); Document doc = mxUtils.parseXml(graphXml); codec.decode(doc.getDocumentElement(), graph.getModel()); mxCell root = (mxCell) graph.getDefaultParent(); TransMeta transMeta = new TransMeta(); decodeCommRootAttr(root, transMeta); transMeta.setTransstatus(Const.toInt(root.getAttribute("trans_status"), -1)); transMeta.setTransversion(root.getAttribute("trans_version")); if (transMeta.getRepository() != null) transMeta.setSharedObjects(transMeta.getRepository().readTransSharedObjects(transMeta)); else transMeta.setSharedObjects(transMeta.readSharedObjects()); transMeta.importFromMetaStore(); decodeDatabases(root, transMeta); decodeNote(graph, transMeta); int count = graph.getModel().getChildCount(root); for (int i = 0; i < count; i++) { mxCell cell = (mxCell) graph.getModel().getChildAt(root, i); if (cell.isVertex()) { Element e = (Element) cell.getValue(); if (PropsUI.TRANS_STEP_NAME.equals(e.getTagName())) { StepDecoder stepDecoder = (StepDecoder) PluginFactory.getBean(cell.getAttribute("ctype")); StepMeta stepMeta = stepDecoder.decodeStep(cell, transMeta.getDatabases(), transMeta.getMetaStore()); stepMeta.setParentTransMeta(transMeta); if (stepMeta.isMissing()) { transMeta.addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface()); } StepMeta check = transMeta.findStep(stepMeta.getName()); if (check != null) { if (!check.isShared()) { // Don't overwrite shared objects transMeta.addOrReplaceStep(stepMeta); } else { check.setDraw(stepMeta.isDrawn()); // Just keep the drawn flag and location check.setLocation(stepMeta.getLocation()); } } else { transMeta.addStep(stepMeta); // simply add it. } } } } // Have all StreamValueLookups, etc. reference the correct source steps... // for (int i = 0; i < transMeta.nrSteps(); i++) { StepMeta stepMeta = transMeta.getStep(i); StepMetaInterface sii = stepMeta.getStepMetaInterface(); if (sii != null) { sii.searchInfoAndTargetSteps(transMeta.getSteps()); } } count = graph.getModel().getChildCount(root); for (int i = 0; i < count; i++) { mxCell cell = (mxCell) graph.getModel().getChildAt(root, i); if (cell.isEdge()) { mxCell source = (mxCell) cell.getSource(); mxCell target = (mxCell) cell.getTarget(); TransHopMeta hopinf = new TransHopMeta(null, null, true); String[] stepNames = transMeta.getStepNames(); for (int j = 0; j < stepNames.length; j++) { if (stepNames[j].equalsIgnoreCase(source.getAttribute("label"))) hopinf.setFromStep(transMeta.getStep(j)); if (stepNames[j].equalsIgnoreCase(target.getAttribute("label"))) hopinf.setToStep(transMeta.getStep(j)); } transMeta.addTransHop(hopinf); } } JSONObject jsonObject = JSONObject.fromObject(root.getAttribute("transLogTable")); TransLogTable transLogTable = transMeta.getTransLogTable(); transLogTable.setConnectionName(jsonObject.optString("connection")); transLogTable.setSchemaName(jsonObject.optString("schema")); transLogTable.setTableName(jsonObject.optString("table")); transLogTable.setLogSizeLimit(jsonObject.optString("size_limit_lines")); transLogTable.setLogInterval(jsonObject.optString("interval")); transLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); JSONArray jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = transLogTable.findField(id); if (field == null) { field = transLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); field.setSubject(StepMeta.findStep(transMeta.getSteps(), fieldJson.optString("subject"))); } } } jsonObject = JSONObject.fromObject(root.getAttribute("stepLogTable")); StepLogTable stepLogTable = transMeta.getStepLogTable(); stepLogTable.setConnectionName(jsonObject.optString("connection")); stepLogTable.setSchemaName(jsonObject.optString("schema")); stepLogTable.setTableName(jsonObject.optString("table")); stepLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = stepLogTable.findField(id); if (field == null && i < stepLogTable.getFields().size()) { field = stepLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonObject = JSONObject.fromObject(root.getAttribute("performanceLogTable")); PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); performanceLogTable.setConnectionName(jsonObject.optString("connection")); performanceLogTable.setSchemaName(jsonObject.optString("schema")); performanceLogTable.setTableName(jsonObject.optString("table")); performanceLogTable.setLogInterval(jsonObject.optString("interval")); performanceLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = performanceLogTable.findField(id); if (field == null && i < performanceLogTable.getFields().size()) { field = performanceLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonObject = JSONObject.fromObject(root.getAttribute("metricsLogTable")); MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); metricsLogTable.setConnectionName(jsonObject.optString("connection")); metricsLogTable.setSchemaName(jsonObject.optString("schema")); metricsLogTable.setTableName(jsonObject.optString("table")); metricsLogTable.setTimeoutInDays(jsonObject.optString("timeout_days")); jsonArray = jsonObject.optJSONArray("fields"); if (jsonArray != null) { for (int i = 0; i < jsonArray.size(); i++) { JSONObject fieldJson = jsonArray.getJSONObject(i); String id = fieldJson.optString("id"); LogTableField field = metricsLogTable.findField(id); if (field == null && i < metricsLogTable.getFields().size()) { field = metricsLogTable.getFields().get(i); } if (field != null) { field.setFieldName(fieldJson.optString("name")); field.setEnabled(fieldJson.optBoolean("enabled")); } } } jsonArray = JSONArray.fromObject(root.getAttribute("partitionschemas")); for (int i = 0; i < jsonArray.size(); i++) { jsonObject = jsonArray.getJSONObject(i); PartitionSchema partitionSchema = decodePartitionSchema(jsonObject); PartitionSchema check = transMeta.findPartitionSchema(partitionSchema.getName()); if (check != null) { if (!check.isShared()) { transMeta.addOrReplacePartitionSchema(partitionSchema); } } else { transMeta.getPartitionSchemas().add(partitionSchema); } } decodeSlaveServers(root, transMeta); jsonArray = JSONArray.fromObject(root.getAttribute("clusterSchemas")); for (int i = 0; i < jsonArray.size(); i++) { jsonObject = jsonArray.getJSONObject(i); ClusterSchema clusterSchema = decodeClusterSchema(jsonObject, transMeta.getSlaveServers()); clusterSchema.shareVariablesWith(transMeta); ClusterSchema check = transMeta.findClusterSchema(clusterSchema.getName()); if (check != null) { if (!check.isShared()) { transMeta.addOrReplaceClusterSchema(clusterSchema); } } else { transMeta.getClusterSchemas().add(clusterSchema); } } for (int i = 0; i < transMeta.nrSteps(); i++) { transMeta.getStep(i).setClusterSchemaAfterLoading(transMeta.getClusterSchemas()); } transMeta.setSizeRowset(Const.toInt(root.getAttribute("size_rowset"), Const.ROWS_IN_ROWSET)); transMeta.setSleepTimeEmpty( Const.toInt(root.getAttribute("sleep_time_empty"), Const.TIMEOUT_GET_MILLIS)); transMeta.setSleepTimeFull( Const.toInt(root.getAttribute("sleep_time_full"), Const.TIMEOUT_PUT_MILLIS)); transMeta.setUsingUniqueConnections( "Y".equalsIgnoreCase(root.getAttribute("unique_connections"))); transMeta.setFeedbackShown(!"N".equalsIgnoreCase(root.getAttribute("feedback_shown"))); transMeta.setFeedbackSize(Const.toInt(root.getAttribute("feedback_size"), Const.ROWS_UPDATE)); transMeta.setUsingThreadPriorityManagment( !"N".equalsIgnoreCase(root.getAttribute("using_thread_priorities"))); transMeta.setCapturingStepPerformanceSnapShots( "Y".equalsIgnoreCase(root.getAttribute("capture_step_performance"))); transMeta.setStepPerformanceCapturingDelay( Const.toLong(root.getAttribute("step_performance_capturing_delay"), 1000)); transMeta.setStepPerformanceCapturingSizeLimit( root.getAttribute("step_performance_capturing_size_limit")); transMeta.setKey(XMLHandler.stringToBinary(root.getAttribute("key_for_session_key"))); transMeta.setPrivateKey("Y".equals(root.getAttribute("is_key_private"))); return transMeta; }