public ClusterSchema loadClusterSchema(ObjectId id_cluster_schema, List<SlaveServer> slaveServers)
      throws KettleException {
    ClusterSchema clusterSchema = new ClusterSchema();
    RowMetaAndData row = getClusterSchema(id_cluster_schema);

    clusterSchema.setObjectId(id_cluster_schema);
    clusterSchema.setName(row.getString(KettleDatabaseRepository.FIELD_CLUSTER_NAME, null));
    clusterSchema.setBasePort(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_BASE_PORT, null));
    clusterSchema.setSocketsBufferSize(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_BUFFER_SIZE, null));
    clusterSchema.setSocketsFlushInterval(
        row.getString(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_FLUSH_INTERVAL, null));
    clusterSchema.setSocketsCompressed(
        row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_SOCKETS_COMPRESSED, true));
    clusterSchema.setDynamic(row.getBoolean(KettleDatabaseRepository.FIELD_CLUSTER_DYNAMIC, true));

    ObjectId[] pids = repository.getClusterSlaveIDs(id_cluster_schema);
    for (int i = 0; i < pids.length; i++) {
      SlaveServer slaveServer = repository.loadSlaveServer(pids[i], null); // Load last version
      SlaveServer reference = SlaveServer.findSlaveServer(slaveServers, slaveServer.getName());
      if (reference != null) {
        clusterSchema.getSlaveServers().add(reference);
      } else {
        clusterSchema.getSlaveServers().add(slaveServer);
      }
    }

    return clusterSchema;
  }
Пример #2
0
  public void encodeClusterSchema(Element e, TransMeta transMeta) {
    JSONArray jsonArray = new JSONArray();
    for (int i = 0; i < transMeta.getClusterSchemas().size(); i++) {
      ClusterSchema clusterSchema = transMeta.getClusterSchemas().get(i);

      JSONObject jsonObject = new JSONObject();
      jsonObject.put("name", clusterSchema.getName());
      jsonObject.put("base_port", clusterSchema.getBasePort());
      jsonObject.put("sockets_buffer_size", clusterSchema.getSocketsBufferSize());

      jsonObject.put("sockets_flush_interval", clusterSchema.getSocketsFlushInterval());
      jsonObject.put("sockets_compressed", clusterSchema.isSocketsCompressed() ? "Y" : "N");
      jsonObject.put("dynamic", clusterSchema.isDynamic() ? "Y" : "N");

      JSONArray slaveservers = new JSONArray();
      for (int j = 0; j < clusterSchema.getSlaveServers().size(); j++) {
        SlaveServer slaveServer = clusterSchema.getSlaveServers().get(j);
        slaveservers.add(SlaveServerCodec.encode(slaveServer));
      }
      jsonObject.put("slaveservers", slaveservers);

      jsonArray.add(jsonObject);
    }
    e.setAttribute("clusterSchemas", jsonArray.toString());
  }
 protected boolean equals(ClusterSchema clusterSchema, ClusterSchema clusterSchema2) {
   if (!equals(clusterSchema.getName(), clusterSchema2.getName())) {
     return false;
   } else if (!equals(clusterSchema.getBasePort(), clusterSchema2.getBasePort())) {
     return false;
   } else if (!equals(
       clusterSchema.getSocketsBufferSize(), clusterSchema2.getSocketsBufferSize())) {
     return false;
   } else if (!equals(
       clusterSchema.getSocketsFlushInterval(), clusterSchema2.getSocketsFlushInterval())) {
     return false;
   } else if (!equals(clusterSchema.isSocketsCompressed(), clusterSchema2.isSocketsCompressed())) {
     return false;
   } else if (!equals(clusterSchema.isDynamic(), clusterSchema2.isDynamic())) {
     return false;
   } else if (!equals(clusterSchema.getSlaveServers(), clusterSchema2.getSlaveServers())) {
     return false;
   }
   return true;
 }
  public void saveClusterSchema(
      ClusterSchema clusterSchema,
      String versionComment,
      ObjectId id_transformation,
      boolean isUsedByTransformation,
      boolean overwrite)
      throws KettleException {
    ObjectId existingClusterSchemaId = getClusterID(clusterSchema.getName());
    if (existingClusterSchemaId != null) {
      clusterSchema.setObjectId(existingClusterSchemaId);
    }

    if (clusterSchema.getObjectId() == null) {
      // New Slave Server
      clusterSchema.setObjectId(insertCluster(clusterSchema));
    } else {

      // If we received a clusterSchemaId and it is different from the cluster schema we are working
      // with...
      if (existingClusterSchemaId != null
          && !clusterSchema.getObjectId().equals(existingClusterSchemaId)) {
        // A cluster with this name already exists
        if (overwrite) {
          // Proceed with save, removing the original version from the repository first
          repository.deleteClusterSchema(existingClusterSchemaId);
          updateCluster(clusterSchema);
        } else {
          throw new KettleObjectExistsException(
              "Failed to save object to repository. Object ["
                  + clusterSchema.getName()
                  + "] already exists.");
        }
      } else {
        // There are no naming collisions (either it is the same object or the name is unique)
        updateCluster(clusterSchema);
      }
    }

    repository.delClusterSlaves(clusterSchema.getObjectId());

    // Also save the used slave server references.
    for (int i = 0; i < clusterSchema.getSlaveServers().size(); i++) {
      SlaveServer slaveServer = clusterSchema.getSlaveServers().get(i);
      if (slaveServer.getObjectId() == null) // oops, not yet saved!
      {
        repository.save(
            slaveServer,
            versionComment,
            null,
            id_transformation,
            isUsedByTransformation,
            overwrite);
      }
      repository.insertClusterSlave(clusterSchema, slaveServer);
    }

    // Save a link to the transformation to keep track of the use of this cluster schema
    // Only save it if it's really used by the transformation
    if (isUsedByTransformation) {
      repository.insertTransformationCluster(id_transformation, clusterSchema.getObjectId());
    }
  }