コード例 #1
0
  /**
   * Gets the base options from the underlying job and stores them in a list
   *
   * @param job the job to extract base options from
   * @return a list of options
   */
  protected List<String> getBaseConfig(HadoopJob job) {
    Map<String, String> userProps = m_propPanel.getProperties();
    for (Map.Entry<String, String> e : userProps.entrySet()) {

      // skip this one! As we'll get it via the base job stuff below
      if (e.getKey() != null && !e.getKey().equals(DistributedJob.WEKA_ADDITIONAL_PACKAGES_KEY)) {
        m_mrConfig.setUserSuppliedProperty(e.getKey(), e.getValue());
      }
    }

    String[] baseJobOpts = job.getBaseOptionsOnly();
    String[] mrConfigOpts = m_mrConfig.getOptions();

    List<String> opts = new ArrayList<String>();
    for (String s : baseJobOpts) {
      opts.add(s);
    }

    for (String s : mrConfigOpts) {
      opts.add(s);
    }

    return opts;
  }
コード例 #2
0
  /** Setup the customizer with the appropriate tabs for the underlying job type */
  protected void setup() {
    removeAll();

    JPanel configHolder = new JPanel();
    configHolder.setLayout(new BorderLayout());
    configHolder.add(m_mrConfigEditor, BorderLayout.NORTH);
    m_propPanel = new HadoopPropertyPanel(m_mrConfig.getUserSuppliedProperties());
    configHolder.add(m_propPanel, BorderLayout.SOUTH);

    JPanel outerP = new JPanel();
    outerP.setLayout(new BorderLayout());
    outerP.add(configHolder, BorderLayout.NORTH);

    m_configTabs.addTab("Hadoop configuration", outerP);

    String jobTitle = m_bean.getClass().getName();
    jobTitle = jobTitle.substring(jobTitle.lastIndexOf(".") + 1, jobTitle.length());

    if (m_job instanceof ArffHeaderHadoopJob) {
      addTabForArffHeaderJob(jobTitle, m_job);
    } else if (m_job instanceof weka.distributed.hadoop.WekaClassifierHadoopJob) {
      m_tempArffJob = new weka.distributed.hadoop.ArffHeaderHadoopJob();
      try {
        m_tempArffJob.setOptions(Utils.splitOptions(m_optionsOrig));
      } catch (Exception ex) {
        ex.printStackTrace();
      }
      addTabForArffHeaderJob("ARFF header creation", m_tempArffJob);
      addTabForClassifierJob(jobTitle, m_job);
    } else if (m_job instanceof weka.distributed.hadoop.WekaClassifierEvaluationHadoopJob) {
      m_tempArffJob = new weka.distributed.hadoop.ArffHeaderHadoopJob();
      try {
        m_tempArffJob.setOptions(Utils.splitOptions(m_optionsOrig));
      } catch (Exception ex) {
        ex.printStackTrace();
      }
      addTabForArffHeaderJob("ARFF header creation", m_tempArffJob);
      addTabForEvaluationJob(jobTitle, m_job);
    } else if (m_job instanceof weka.distributed.hadoop.CorrelationMatrixHadoopJob) {
      m_tempArffJob = new weka.distributed.hadoop.ArffHeaderHadoopJob();
      try {
        m_tempArffJob.setOptions(Utils.splitOptions(m_optionsOrig));
      } catch (Exception ex) {
        ex.printStackTrace();
      }
      addTabForArffHeaderJob("ARFF header creation", m_tempArffJob);
      addTabForCorrelationMatrixJob(jobTitle, m_job);
    } else if (m_job instanceof weka.distributed.hadoop.WekaScoringHadoopJob) {
      m_tempArffJob = new weka.distributed.hadoop.ArffHeaderHadoopJob();
      try {
        m_tempArffJob.setOptions(Utils.splitOptions(m_optionsOrig));
      } catch (Exception ex) {
        ex.printStackTrace();
      }
      addTabForArffHeaderJob("ARFF header creation", m_tempArffJob);
      addTabForScoringJob(jobTitle, m_job);
    }

    add(m_configTabs, BorderLayout.CENTER);

    addButtons();
  }