public Workflow doDelete(
     Session mysession, Request myrequest, Response myresponse, Configuration myconfig, DB db) {
   boolean accesspermission =
       RequireUser.SuperAdministrator(
           text,
           mysession.get("username"),
           myconfig.get(db, "superadmin"),
           myrequest,
           myresponse,
           db.getDatabase(),
           mysession.get("database"));
   if (!accesspermission) return new Workflow(text);
   Workflow workflow = new Workflow(text);
   workflow.read(db, myrequest.getParameter("id"));
   Cms.CMSAudit(
       "action=delete workflow="
           + workflow.getTitle()
           + " - "
           + workflow.getAction()
           + " username="******"username")
           + " userid="
           + mysession.get("userid"));
   workflow.delete(db);
   return workflow;
 }
  @Override
  protected void backPressed() {
    // move workflow to previous state, start up a new thread to do it.
    final Workflow pipe = getWizard().getWorkflow();

    if (pipe.getCurrentState().getPreviousState() == null) {
      // this means that this wizard is part of a larger wizard and we need to return control
      // to it.  See setAdapter for details
      workflowWizardAdapter.backPressed();
    }

    IRunnableWithProgress runnable =
        new IRunnableWithProgress() {
          public void run(IProgressMonitor monitor)
              throws InvocationTargetException, InterruptedException {

            pipe.previous(monitor);
          }
        };
    try {
      run(false, true, runnable);
    } catch (InvocationTargetException e) {
      CatalogUIPlugin.log(e.getLocalizedMessage(), e);
    } catch (InterruptedException e) {
      CatalogUIPlugin.log(e.getLocalizedMessage(), e);
    }
  }
  /**
   * This method builds a decision tree model
   *
   * @param sparkContext JavaSparkContext initialized with the application
   * @param modelID Model ID
   * @param trainingData Training data as a JavaRDD of LabeledPoints
   * @param testingData Testing data as a JavaRDD of LabeledPoints
   * @param workflow Machine learning workflow
   * @param mlModel Deployable machine learning model
   * @throws MLModelBuilderException
   */
  private ModelSummary buildDecisionTreeModel(
      JavaSparkContext sparkContext,
      long modelID,
      JavaRDD<LabeledPoint> trainingData,
      JavaRDD<LabeledPoint> testingData,
      Workflow workflow,
      MLModel mlModel,
      SortedMap<Integer, String> includedFeatures,
      Map<Integer, Integer> categoricalFeatureInfo)
      throws MLModelBuilderException {
    try {
      Map<String, String> hyperParameters = workflow.getHyperParameters();
      DecisionTree decisionTree = new DecisionTree();
      DecisionTreeModel decisionTreeModel =
          decisionTree.train(
              trainingData,
              getNoOfClasses(mlModel),
              categoricalFeatureInfo,
              hyperParameters.get(MLConstants.IMPURITY),
              Integer.parseInt(hyperParameters.get(MLConstants.MAX_DEPTH)),
              Integer.parseInt(hyperParameters.get(MLConstants.MAX_BINS)));

      // remove from cache
      trainingData.unpersist();
      // add test data to cache
      testingData.cache();

      JavaPairRDD<Double, Double> predictionsAndLabels =
          decisionTree.test(decisionTreeModel, testingData).cache();
      ClassClassificationAndRegressionModelSummary classClassificationAndRegressionModelSummary =
          SparkModelUtils.getClassClassificationModelSummary(
              sparkContext, testingData, predictionsAndLabels);

      // remove from cache
      testingData.unpersist();

      mlModel.setModel(new MLDecisionTreeModel(decisionTreeModel));

      classClassificationAndRegressionModelSummary.setFeatures(
          includedFeatures.values().toArray(new String[0]));
      classClassificationAndRegressionModelSummary.setAlgorithm(
          SUPERVISED_ALGORITHM.DECISION_TREE.toString());

      MulticlassMetrics multiclassMetrics =
          getMulticlassMetrics(sparkContext, predictionsAndLabels);

      predictionsAndLabels.unpersist();

      classClassificationAndRegressionModelSummary.setMulticlassConfusionMatrix(
          getMulticlassConfusionMatrix(multiclassMetrics, mlModel));
      Double modelAccuracy = getModelAccuracy(multiclassMetrics);
      classClassificationAndRegressionModelSummary.setModelAccuracy(modelAccuracy);
      classClassificationAndRegressionModelSummary.setDatasetVersion(workflow.getDatasetVersion());

      return classClassificationAndRegressionModelSummary;
    } catch (Exception e) {
      throw new MLModelBuilderException(
          "An error occurred while building decision tree model: " + e.getMessage(), e);
    }
  }
Esempio n. 4
0
 private WorkflowEntry(String pageName, String title, Workflow workflow, WorkflowEntry parent) {
   this.pageName = pageName;
   this.title = title;
   this.workflow = workflow;
   this.id = workflow.nextId();
   this.parent = parent;
   if (parent != null) {
     parent.children.add(this);
   }
   workflow.register(this);
 }
  /**
   * This method builds a naive bayes model
   *
   * @param sparkContext JavaSparkContext initialized with the application
   * @param modelID Model ID
   * @param trainingData Training data as a JavaRDD of LabeledPoints
   * @param testingData Testing data as a JavaRDD of LabeledPoints
   * @param workflow Machine learning workflow
   * @param mlModel Deployable machine learning model
   * @throws MLModelBuilderException
   */
  private ModelSummary buildNaiveBayesModel(
      JavaSparkContext sparkContext,
      long modelID,
      JavaRDD<LabeledPoint> trainingData,
      JavaRDD<LabeledPoint> testingData,
      Workflow workflow,
      MLModel mlModel,
      SortedMap<Integer, String> includedFeatures)
      throws MLModelBuilderException {
    try {
      Map<String, String> hyperParameters = workflow.getHyperParameters();
      NaiveBayesClassifier naiveBayesClassifier = new NaiveBayesClassifier();
      NaiveBayesModel naiveBayesModel =
          naiveBayesClassifier.train(
              trainingData, Double.parseDouble(hyperParameters.get(MLConstants.LAMBDA)));

      // remove from cache
      trainingData.unpersist();
      // add test data to cache
      testingData.cache();

      JavaPairRDD<Double, Double> predictionsAndLabels =
          naiveBayesClassifier.test(naiveBayesModel, testingData).cache();
      ClassClassificationAndRegressionModelSummary classClassificationAndRegressionModelSummary =
          SparkModelUtils.getClassClassificationModelSummary(
              sparkContext, testingData, predictionsAndLabels);

      // remove from cache
      testingData.unpersist();

      mlModel.setModel(new MLClassificationModel(naiveBayesModel));

      classClassificationAndRegressionModelSummary.setFeatures(
          includedFeatures.values().toArray(new String[0]));
      classClassificationAndRegressionModelSummary.setAlgorithm(
          SUPERVISED_ALGORITHM.NAIVE_BAYES.toString());

      MulticlassMetrics multiclassMetrics =
          getMulticlassMetrics(sparkContext, predictionsAndLabels);

      predictionsAndLabels.unpersist();

      classClassificationAndRegressionModelSummary.setMulticlassConfusionMatrix(
          getMulticlassConfusionMatrix(multiclassMetrics, mlModel));
      Double modelAccuracy = getModelAccuracy(multiclassMetrics);
      classClassificationAndRegressionModelSummary.setModelAccuracy(modelAccuracy);
      classClassificationAndRegressionModelSummary.setDatasetVersion(workflow.getDatasetVersion());

      return classClassificationAndRegressionModelSummary;
    } catch (Exception e) {
      throw new MLModelBuilderException(
          "An error occurred while building naive bayes model: " + e.getMessage(), e);
    }
  }
Esempio n. 6
0
  /**
   * 根据工作流的定义事物返回对应示例。
   *
   * @param thing
   * @return
   */
  public static List<Workflow> getWorkflowInstances(Thing thing) {
    List<Workflow> flows = new ArrayList<Workflow>();
    for (String key : workflows.keySet()) {
      Workflow workflow = workflows.get(key);
      Thing fthing = workflow.getThing();
      if (fthing == thing) {
        flows.add(workflow);
      }
    }

    return flows;
  }
Esempio n. 7
0
  /**
   * Start the workflow run asynchronously.
   *
   * @param name The name of the workflow
   * @return json response containing id
   */
  @Security.Authenticated(Secured.class)
  public Result runWorkflow(String name) {
    FormDefinition form = formDefinitionForWorkflow(name);

    // Process file upload first if present in form data
    Http.MultipartFormData body = request().body().asMultipartFormData();

    for (Object obj : body.getFiles()) {
      Http.MultipartFormData.FilePart filePart = (Http.MultipartFormData.FilePart) obj;
      UserUpload userUpload = uploadFile(filePart);

      BasicField fileInputField = form.getField(filePart.getKey());
      fileInputField.setValue(userUpload);
    }

    //  Set the form definition field values from the request data
    Map<String, String[]> data = body.asFormUrlEncoded();
    for (String key : data.keySet()) {
      BasicField field = form.getField(key);
      field.setValue(data.get(key));
    }

    // Transfer form field data to workflow settings map
    Map<String, Object> settings = new HashMap<>();

    for (BasicField field : form.fields) {
      settings.put(field.name, field.value());
    }

    settings.putAll(settingsFromConfig(form));

    // Update the workflow model object and persist to the db
    Workflow workflow = Workflow.find.where().eq("name", form.name).findUnique();

    if (workflow == null) {
      workflow = new Workflow();
    }

    workflow.name = form.name;
    workflow.title = form.title;
    workflow.yamlFile = form.yamlFile;

    workflow.save();

    // Run the workflow
    ObjectNode response = runYamlWorkflow(form.yamlFile, workflow, settings);

    return redirect(routes.Application.index());
  }
  /**
   * Runs the dialog in headless mode. The dialog will run headless while the workflow can run.
   *
   * @param monitor
   * @param true if the workflow ran and completed correctly. False if it failed or the user
   *     cancelled (because user interaction was required)
   */
  public boolean runHeadless(IProgressMonitor monitor) {
    try {
      this.headless = true;
      int ticks = getWorkflowWizard().getWorkflow().getStates().length * 10;
      monitor.beginTask(Messages.WorkflowWizardDialog_importTask, ticks);
      // we must ensure that the contents of the dialog (shell) have been
      // creates, needed for wizard pages
      if (getShell() == null) {
        // do in ui thread
        PlatformGIS.syncInDisplayThread(
            new Runnable() {
              public void run() {
                create();
              }
            });
      }

      Workflow pipe = getWizard().getWorkflow();
      pipe.run(new SubProgressMonitor(monitor, ticks));
      final boolean[] result = new boolean[] {true};
      if (!pipe.isFinished()) {
        // show the page corresponding to the current state
        final IWizardPage page = getWizard().getPage(pipe.getCurrentState());
        if (page != null) {
          // ensure the page has a state if it is a DataPipelinePage
          if (page instanceof WorkflowWizardPage) {
            WorkflowWizardPage dpPage = (WorkflowWizardPage) page;
            if (dpPage.getState() == null) dpPage.setState(pipe.getCurrentState());
          }

          PlatformGIS.syncInDisplayThread(
              new Runnable() {
                public void run() {
                  headless = false;
                  showPage(page);
                  if (open() == Window.CANCEL) {
                    result[0] = false;
                  }
                };
              });
        }
      }

      this.headless = false;
      return result[0];
    } finally {
      monitor.done();
    }
  }
Esempio n. 9
0
  /**
   * 返回当前所有工作流多定义的事物列表。
   *
   * @return
   */
  public static List<Thing> getWorkflowThings() {
    List<Thing> things = new ArrayList<Thing>();
    Map<String, String> thingMap = new HashMap<String, String>();

    for (String key : workflows.keySet()) {
      Workflow workflow = workflows.get(key);
      Thing thing = workflow.getThing();
      String path = thing.getMetadata().getPath();
      if (thingMap.get(path) == null) {
        things.add(thing);
        thingMap.put(path, path);
      }
    }

    return things;
  }
 public Workflow getDelete(
     Session mysession, Request myrequest, Response myresponse, Configuration myconfig, DB db) {
   boolean accesspermission =
       RequireUser.SuperAdministrator(
           text,
           mysession.get("username"),
           myconfig.get(db, "superadmin"),
           myrequest,
           myresponse,
           db.getDatabase(),
           mysession.get("database"));
   if (!accesspermission) return new Workflow(text);
   Workflow workflow = new Workflow(text);
   workflow.read(db, myrequest.getParameter("id"));
   return workflow;
 }
Esempio n. 11
0
 @Override
 public int hashCode() {
   final int prime = 31;
   int result = 1;
   result = prime * result + (getUntilFinished() == null ? 0 : getUntilFinished().hashCode());
   result = prime * result + (parent == null ? 0 : parent.hashCode());
   result = prime * result + (getBlock() == null ? 0 : getBlock().hashCode());
   return result;
 }
Esempio n. 12
0
 @Override
 public void setParent(Workflow parent) {
   if (this.parent != null && this.parent != parent) {
     this.parent.getControlLinks().remove(this);
   }
   this.parent = parent;
   if (parent != null) {
     parent.getControlLinks().add(this);
   }
 }
Esempio n. 13
0
  public static void main(String[] args) {
    Power power = new Power();
    power.on();

    Workflow p1 = new Workflow("p1", power);
    Workflow p2 = new Workflow("p2", power);
    Workflow p3 = new Workflow("p3", power);
    Workflow p4 = new Workflow("p4", power);

    p2.waitFor(p1);

    p1.start();
    p2.start();
    p3.start();
    p4.start();

    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }

    power.off();

    try {
      Thread.sleep(100);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }

    power.on();

    try {
      Thread.sleep(500);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }

    p3.pause(1000);
  }
  /** Creates some attachments and assigns them to the test workflow. */
  @Test
  public void testAttachments() {
    final PK workflowPk = testWorkflow.getPK();
    // create product attachment
    final Product product = jaloSession.getProductManager().createProduct("sabbers");
    assertNotNull("Product not null", product);
    Map<String, Object> map = new HashMap<String, Object>();
    map.put(WorkflowItemAttachment.CODE, "productTest");
    map.put(WorkflowItemAttachment.ITEM, product);
    map.put(WorkflowItemAttachment.WORKFLOW, testWorkflow);
    final WorkflowItemAttachment attachProduct =
        WorkflowManager.getInstance().createWorkflowItemAttachment(map);
    assertNotNull("Attachment not null", attachProduct);

    // create category attachment
    final Category category =
        CategoryManager.getInstance().createCategory(PK.createUUIDPK(0).getHex());
    assertNotNull("Category not null", category);
    map = new HashMap<String, Object>();
    map.put(WorkflowItemAttachment.CODE, "categoryTest");
    map.put(WorkflowItemAttachment.ITEM, category);
    map.put(WorkflowItemAttachment.WORKFLOW, testWorkflow);
    final WorkflowItemAttachment attachCategory =
        WorkflowManager.getInstance().createWorkflowItemAttachment(map);
    assertNotNull("Attachment not null", attachCategory);

    final WorkflowAction action1 = getAction(ACTIONCODES.ACTION1.name());
    action1.setAttachments(
        Arrays.asList(new WorkflowItemAttachment[] {attachProduct, attachCategory}));

    // restart
    Registry.getCurrentTenant().getCache();

    // check attachments
    final Workflow found = JaloSession.getCurrentSession().getItem(workflowPk);
    assertEquals("Excpected number of attachments", 2, found.getAttachments().size());
    final WorkflowAction foundAction = getAction(ACTIONCODES.ACTION1.name());
    assertEquals(
        "Excpected number of attachments of action 1", 2, foundAction.getAttachments().size());
  }
 public Workflow getIndex(
     Session mysession, Request myrequest, Response myresponse, Configuration myconfig, DB db) {
   boolean accesspermission =
       RequireUser.SuperAdministrator(
           text,
           mysession.get("username"),
           myconfig.get(db, "superadmin"),
           myrequest,
           myresponse,
           db.getDatabase(),
           mysession.get("database"));
   if (!accesspermission) return new Workflow(text);
   String SQL = "select distinct title,action,fromstate,tostate,id from workflow";
   String SQLwhere = "";
   if (!myrequest.getParameter("workflow").equals("")) {
     SQLwhere = " where title='" + Common.SQL_clean(myrequest.getParameter("workflow")) + "'";
   }
   String SQLorder = " order by title,action,fromstate,tostate,id";
   SQL += SQLwhere + SQLorder;
   Workflow workflow = new Workflow(text);
   workflow.records(db, SQL);
   return workflow;
 }
Esempio n. 16
0
 @Bean
 protected ReadEmailTaskJobFactory readEmailTaskJobFactory() {
   return new ReadEmailTaskJobFactory( //
       email.emailAccountFacade(), //
       email.emailServiceFactory(), //
       email.subjectHandler(), //
       email.emailStore(), //
       workflow
           .systemWorkflowLogicBuilder() //
           .build(), //
       dms.defaultDmsLogic(), //
       data.systemDataView(), //
       email.emailTemplateLogic(), //
       template.databaseTemplateEngine(), //
       emailTemplateSenderFactory() //
       );
 }
Esempio n. 17
0
  @Bean
  protected ObserverFactory observerFactory() {
    return new DefaultObserverFactory( //
        userStore, //
        api.systemFluentApi(), //
        workflow.systemWorkflowLogicBuilder().build(), //
        email.emailAccountFacade(), //
        email.emailTemplateLogic(), //
        data.systemDataView(), //
        new Supplier<CMDataView>() {

          @Override
          public CMDataView get() {
            return user.userDataView();
          }
        }, //
        emailTemplateSenderFactory() //
        );
  }
Esempio n. 18
0
 public void activate(IRequestCycle cycle) {
   workflow.setCurrentEntry(this);
   DataContext.bindThreadDataContext(dataContext);
   cycle.getPage(pageName).validate(cycle);
   cycle.activate(pageName);
 }
  /**
   * This method builds a lasso regression model
   *
   * @param sparkContext JavaSparkContext initialized with the application
   * @param modelID Model ID
   * @param trainingData Training data as a JavaRDD of LabeledPoints
   * @param testingData Testing data as a JavaRDD of LabeledPoints
   * @param workflow Machine learning workflow
   * @param mlModel Deployable machine learning model
   * @throws MLModelBuilderException
   */
  private ModelSummary buildLassoRegressionModel(
      JavaSparkContext sparkContext,
      long modelID,
      JavaRDD<LabeledPoint> trainingData,
      JavaRDD<LabeledPoint> testingData,
      Workflow workflow,
      MLModel mlModel,
      SortedMap<Integer, String> includedFeatures)
      throws MLModelBuilderException {
    try {
      LassoRegression lassoRegression = new LassoRegression();
      Map<String, String> hyperParameters = workflow.getHyperParameters();
      LassoModel lassoModel =
          lassoRegression.train(
              trainingData,
              Integer.parseInt(hyperParameters.get(MLConstants.ITERATIONS)),
              Double.parseDouble(hyperParameters.get(MLConstants.LEARNING_RATE)),
              Double.parseDouble(hyperParameters.get(MLConstants.REGULARIZATION_PARAMETER)),
              Double.parseDouble(hyperParameters.get(MLConstants.SGD_DATA_FRACTION)));

      // remove from cache
      trainingData.unpersist();
      // add test data to cache
      testingData.cache();

      Vector weights = lassoModel.weights();
      if (!isValidWeights(weights)) {
        throw new MLModelBuilderException(
            "Weights of the model generated are null or infinity. [Weights] "
                + vectorToString(weights));
      }
      JavaRDD<Tuple2<Double, Double>> predictionsAndLabels =
          lassoRegression.test(lassoModel, testingData).cache();
      ClassClassificationAndRegressionModelSummary regressionModelSummary =
          SparkModelUtils.generateRegressionModelSummary(
              sparkContext, testingData, predictionsAndLabels);

      // remove from cache
      testingData.unpersist();

      mlModel.setModel(new MLGeneralizedLinearModel(lassoModel));

      List<FeatureImportance> featureWeights =
          getFeatureWeights(includedFeatures, lassoModel.weights().toArray());
      regressionModelSummary.setFeatures(includedFeatures.values().toArray(new String[0]));
      regressionModelSummary.setAlgorithm(SUPERVISED_ALGORITHM.LASSO_REGRESSION.toString());
      regressionModelSummary.setFeatureImportance(featureWeights);

      RegressionMetrics regressionMetrics =
          getRegressionMetrics(sparkContext, predictionsAndLabels);

      predictionsAndLabels.unpersist();

      Double meanSquaredError = regressionMetrics.meanSquaredError();
      regressionModelSummary.setMeanSquaredError(meanSquaredError);
      regressionModelSummary.setDatasetVersion(workflow.getDatasetVersion());

      return regressionModelSummary;
    } catch (Exception e) {
      throw new MLModelBuilderException(
          "An error occurred while building lasso regression model: " + e.getMessage(), e);
    }
  }
  /**
   * This method builds a support vector machine (SVM) model
   *
   * @param sparkContext JavaSparkContext initialized with the application
   * @param modelID Model ID
   * @param trainingData Training data as a JavaRDD of LabeledPoints
   * @param testingData Testing data as a JavaRDD of LabeledPoints
   * @param workflow Machine learning workflow
   * @param mlModel Deployable machine learning model
   * @throws MLModelBuilderException
   */
  private ModelSummary buildSVMModel(
      JavaSparkContext sparkContext,
      long modelID,
      JavaRDD<LabeledPoint> trainingData,
      JavaRDD<LabeledPoint> testingData,
      Workflow workflow,
      MLModel mlModel,
      SortedMap<Integer, String> includedFeatures)
      throws MLModelBuilderException {

    if (getNoOfClasses(mlModel) > 2) {
      throw new MLModelBuilderException(
          "A binary classification algorithm cannot have more than "
              + "two distinct values in response variable.");
    }

    try {
      SVM svm = new SVM();
      Map<String, String> hyperParameters = workflow.getHyperParameters();
      SVMModel svmModel =
          svm.train(
              trainingData,
              Integer.parseInt(hyperParameters.get(MLConstants.ITERATIONS)),
              hyperParameters.get(MLConstants.REGULARIZATION_TYPE),
              Double.parseDouble(hyperParameters.get(MLConstants.REGULARIZATION_PARAMETER)),
              Double.parseDouble(hyperParameters.get(MLConstants.LEARNING_RATE)),
              Double.parseDouble(hyperParameters.get(MLConstants.SGD_DATA_FRACTION)));

      // remove from cache
      trainingData.unpersist();
      // add test data to cache
      testingData.cache();

      Vector weights = svmModel.weights();
      if (!isValidWeights(weights)) {
        throw new MLModelBuilderException(
            "Weights of the model generated are null or infinity. [Weights] "
                + vectorToString(weights));
      }

      // getting scores and labels without clearing threshold to get confusion matrix
      JavaRDD<Tuple2<Object, Object>> scoresAndLabelsThresholded = svm.test(svmModel, testingData);
      MulticlassMetrics multiclassMetrics =
          new MulticlassMetrics(JavaRDD.toRDD(scoresAndLabelsThresholded));
      MulticlassConfusionMatrix multiclassConfusionMatrix =
          getMulticlassConfusionMatrix(multiclassMetrics, mlModel);

      svmModel.clearThreshold();
      JavaRDD<Tuple2<Object, Object>> scoresAndLabels = svm.test(svmModel, testingData);
      ProbabilisticClassificationModelSummary probabilisticClassificationModelSummary =
          SparkModelUtils.generateProbabilisticClassificationModelSummary(
              sparkContext, testingData, scoresAndLabels);

      // remove from cache
      testingData.unpersist();

      mlModel.setModel(new MLClassificationModel(svmModel));

      List<FeatureImportance> featureWeights =
          getFeatureWeights(includedFeatures, svmModel.weights().toArray());
      probabilisticClassificationModelSummary.setFeatures(
          includedFeatures.values().toArray(new String[0]));
      probabilisticClassificationModelSummary.setFeatureImportance(featureWeights);
      probabilisticClassificationModelSummary.setAlgorithm(SUPERVISED_ALGORITHM.SVM.toString());

      probabilisticClassificationModelSummary.setMulticlassConfusionMatrix(
          multiclassConfusionMatrix);
      Double modelAccuracy = getModelAccuracy(multiclassMetrics);
      probabilisticClassificationModelSummary.setModelAccuracy(modelAccuracy);
      probabilisticClassificationModelSummary.setDatasetVersion(workflow.getDatasetVersion());

      return probabilisticClassificationModelSummary;
    } catch (Exception e) {
      throw new MLModelBuilderException(
          "An error occurred while building SVM model: " + e.getMessage(), e);
    }
  }
 public void linkNodesToWorkflow() {
   workflow.addNode(_ProposeRoute);
   workflow.addNode(_EnterNumFireTruck);
   workflow.addNode(_FireRouteParameters);
   workflow.addNode(_AndJoin115936);
   workflow.addNode(_Develop);
   workflow.addNode(_EnterNumPoliceVehicle);
   workflow.addNode(_PoliceRouteParameters);
   workflow.addNode(_RoutesCoordinated);
   workflow.addNode(_AndFork115896);
   workflow.addNode(_RoutesNotCoordinated);
   workflow.addNode(_AspectMarker117632);
   workflow.addNode(_AspectMarker117636);
   workflow.addNode(_ProcessFireRouteParameters);
   workflow.addNode(_ProcessPoliceRouteParameters);
 }
  /** Build a supervised model. */
  public MLModel build() throws MLModelBuilderException {
    MLModelConfigurationContext context = getContext();
    JavaSparkContext sparkContext = null;
    DatabaseService databaseService = MLCoreServiceValueHolder.getInstance().getDatabaseService();
    MLModel mlModel = new MLModel();
    try {
      sparkContext = context.getSparkContext();
      Workflow workflow = context.getFacts();
      long modelId = context.getModelId();

      // Verify validity of response variable
      String typeOfResponseVariable =
          getTypeOfResponseVariable(workflow.getResponseVariable(), workflow.getFeatures());

      if (typeOfResponseVariable == null) {
        throw new MLModelBuilderException(
            "Type of response variable cannot be null for supervised learning " + "algorithms.");
      }

      // Stops model building if a categorical attribute is used with numerical prediction
      if (workflow.getAlgorithmClass().equals(AlgorithmType.NUMERICAL_PREDICTION.getValue())
          && typeOfResponseVariable.equals(FeatureType.CATEGORICAL)) {
        throw new MLModelBuilderException(
            "Categorical attribute "
                + workflow.getResponseVariable()
                + " cannot be used as the response variable of the Numerical Prediction algorithm: "
                + workflow.getAlgorithmName());
      }

      // generate train and test datasets by converting tokens to labeled points
      int responseIndex = context.getResponseIndex();
      SortedMap<Integer, String> includedFeatures =
          MLUtils.getIncludedFeaturesAfterReordering(
              workflow, context.getNewToOldIndicesList(), responseIndex);

      // gets the pre-processed dataset
      JavaRDD<LabeledPoint> labeledPoints = preProcess().cache();

      JavaRDD<LabeledPoint>[] dataSplit =
          labeledPoints.randomSplit(
              new double[] {workflow.getTrainDataFraction(), 1 - workflow.getTrainDataFraction()},
              MLConstants.RANDOM_SEED);

      // remove from cache
      labeledPoints.unpersist();

      JavaRDD<LabeledPoint> trainingData = dataSplit[0].cache();
      JavaRDD<LabeledPoint> testingData = dataSplit[1];
      // create a deployable MLModel object
      mlModel.setAlgorithmName(workflow.getAlgorithmName());
      mlModel.setAlgorithmClass(workflow.getAlgorithmClass());
      mlModel.setFeatures(workflow.getIncludedFeatures());
      mlModel.setResponseVariable(workflow.getResponseVariable());
      mlModel.setEncodings(context.getEncodings());
      mlModel.setNewToOldIndicesList(context.getNewToOldIndicesList());
      mlModel.setResponseIndex(responseIndex);

      ModelSummary summaryModel = null;
      Map<Integer, Integer> categoricalFeatureInfo;

      // build a machine learning model according to user selected algorithm
      SUPERVISED_ALGORITHM supervisedAlgorithm =
          SUPERVISED_ALGORITHM.valueOf(workflow.getAlgorithmName());
      switch (supervisedAlgorithm) {
        case LOGISTIC_REGRESSION:
          summaryModel =
              buildLogisticRegressionModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures,
                  true);
          break;
        case LOGISTIC_REGRESSION_LBFGS:
          summaryModel =
              buildLogisticRegressionModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures,
                  false);
          break;
        case DECISION_TREE:
          categoricalFeatureInfo = getCategoricalFeatureInfo(context.getEncodings());
          summaryModel =
              buildDecisionTreeModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures,
                  categoricalFeatureInfo);
          break;
        case RANDOM_FOREST:
          categoricalFeatureInfo = getCategoricalFeatureInfo(context.getEncodings());
          summaryModel =
              buildRandomForestTreeModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures,
                  categoricalFeatureInfo);
          break;
        case SVM:
          summaryModel =
              buildSVMModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures);
          break;
        case NAIVE_BAYES:
          summaryModel =
              buildNaiveBayesModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures);
          break;
        case LINEAR_REGRESSION:
          summaryModel =
              buildLinearRegressionModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures);
          break;
        case RIDGE_REGRESSION:
          summaryModel =
              buildRidgeRegressionModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures);
          break;
        case LASSO_REGRESSION:
          summaryModel =
              buildLassoRegressionModel(
                  sparkContext,
                  modelId,
                  trainingData,
                  testingData,
                  workflow,
                  mlModel,
                  includedFeatures);
          break;
        default:
          throw new AlgorithmNameException("Incorrect algorithm name");
      }

      // persist model summary
      databaseService.updateModelSummary(modelId, summaryModel);
      return mlModel;
    } catch (Exception e) {
      throw new MLModelBuilderException(
          "An error occurred while building supervised machine learning model: " + e.getMessage(),
          e);
    }
  }
Esempio n. 23
0
  /**
   * 移除工作流。
   *
   * @param workflow
   */
  public static void removeWorkflow(Workflow workflow) {
    String id = workflow.getThing().getMetadata().getPath() + ":" + workflow.getWorkflowId();

    workflows.remove(id);
  }
 @Override
 public String apply(Workflow input) {
   return input.getName();
 }
Esempio n. 25
0
 @Bean
 protected StartWorkflowTaskJobFactory startWorkflowTaskJobFactory() {
   return new StartWorkflowTaskJobFactory(workflow.systemWorkflowLogicBuilder().build());
 }
Esempio n. 26
0
 public void redirect(IRequestCycle cycle, String anchor) {
   workflow.setCurrentEntry(this);
   ILink link = workflowService(cycle).getLink(false, this);
   throw new RedirectException(link.getURL(anchor, true));
 }
 public void linkStartNodesToWorkflow() {
   workflow.addStartupNode(_Develop, false);
 }
Esempio n. 28
0
  /**
   * 放入工作流,主要是放入决定是否放入到缓存中。
   *
   * @param workflow
   */
  public static void putWorkflow(Workflow workflow) {
    String id = workflow.getThing().getMetadata().getPath() + ":" + workflow.getWorkflowId();

    workflows.put(id, workflow);
  }