Esempio n. 1
1
 /**
  * JobTracker.submitJob() kicks off a new job.
  *
  * <p>Create a 'JobInProgress' object, which contains both JobProfile and JobStatus. Those two
  * sub-objects are sometimes shipped outside of the JobTracker. But JobInProgress adds info that's
  * useful for the JobTracker alone.
  *
  * <p>We add the JIP to the jobInitQueue, which is processed asynchronously to handle
  * split-computation and build up the right TaskTracker/Block mapping.
  */
 public synchronized JobStatus submitJob(String jobFile) throws IOException {
   totalSubmissions++;
   JobInProgress job = new JobInProgress(jobFile, this, this.conf);
   synchronized (jobs) {
     synchronized (jobsByArrival) {
       synchronized (jobInitQueue) {
         jobs.put(job.getProfile().getJobId(), job);
         jobsByArrival.add(job);
         jobInitQueue.add(job);
         jobInitQueue.notifyAll();
       }
     }
   }
   return job.getStatus();
 }
  /* Check: getLoggerNames() must return correct names
   *        for registered loggers and their parents.
   * Returns boolean values: PASSED or FAILED
   */
  public static boolean checkLoggers() {
    String failMsg = "# checkLoggers: getLoggerNames() returned unexpected loggers";
    Vector<String> expectedLoggerNames = new Vector<String>(getDefaultLoggerNames());

    // Create the logger LOGGER_NAME_1
    Logger.getLogger(LOGGER_NAME_1);
    expectedLoggerNames.addElement(PARENT_NAME_1);
    expectedLoggerNames.addElement(LOGGER_NAME_1);

    // Create the logger LOGGER_NAME_2
    Logger.getLogger(LOGGER_NAME_2);
    expectedLoggerNames.addElement(PARENT_NAME_2);
    expectedLoggerNames.addElement(LOGGER_NAME_2);

    Enumeration<String> returnedLoggersEnum = logMgr.getLoggerNames();
    Vector<String> returnedLoggerNames = new Vector<String>(0);
    while (returnedLoggersEnum.hasMoreElements()) {
      String logger = returnedLoggersEnum.nextElement();
      if (!initialLoggerNames.contains(logger)) {
        // filter out the loggers that have been added before this test runs
        returnedLoggerNames.addElement(logger);
      }
    }
    ;

    return checkNames(expectedLoggerNames, returnedLoggerNames, failMsg);
  }
Esempio n. 3
0
  public void addString(String str) {
    char[] array = str.toCharArray();
    strings.add(new TreeString(strings.size(), array));

    logger.log(Level.INFO, String.format("Adding string \"%s\"", str));

    ukkonenExtendSuffixTree(strings.size() - 1);
    // naiveExtendSuffixTree(strings.size()-1);
  }
Esempio n. 4
0
 public Vector completedJobs() {
   Vector v = new Vector();
   for (Iterator it = jobs.values().iterator(); it.hasNext(); ) {
     JobInProgress jip = (JobInProgress) it.next();
     JobStatus status = jip.getStatus();
     if (status.getRunState() == JobStatus.SUCCEEDED) {
       v.add(jip);
     }
   }
   return v;
 }
Esempio n. 5
0
 public Vector runningJobs() {
   Vector v = new Vector();
   for (Iterator it = jobs.values().iterator(); it.hasNext(); ) {
     JobInProgress jip = (JobInProgress) it.next();
     JobStatus status = jip.getStatus();
     if (status.getRunState() == JobStatus.RUNNING) {
       v.add(jip);
     }
   }
   return v;
 }
  // Returns boolean values: PASSED or FAILED
  private static boolean checkNames(
      Vector<String> expNames, Vector<String> retNames, String failMsg) {
    boolean status = PASSED;

    if (expNames.size() != retNames.size()) {
      status = FAILED;
    } else {
      boolean checked[] = new boolean[retNames.size()];
      for (int i = 0; i < expNames.size(); i++) {
        int j = 0;
        for (; j < retNames.size(); j++) {
          if (!checked[j] && expNames.elementAt(i).equals(retNames.elementAt(j))) {
            checked[j] = true;
            break;
          }
        }
        if (j >= retNames.size()) {
          status = FAILED;
          break;
        }
      }
    }
    if (!status) {
      printFailMsg(expNames, retNames, failMsg);
    }
    return status;
  }
Esempio n. 7
0
  private void ukkonenExtendSuffixTree(int arrayIdx) {
    logger.entering("UkkonenSuffixTree", "ukkonenExtendSuffixTree");
    logger.log(Level.FINEST, String.format("Ukkonen Algorithm String #%d", arrayIdx));

    TreeString string = strings.get(arrayIdx);
    extState = new UkkonenState(string);

    logger.log(
        Level.FINEST,
        String.format("Ukkonen: (%d,%d)", extState.nextPhaseStart, extState.string.length()));

    for (int phase = extState.nextPhaseStart; phase < extState.string.length(); phase++) {
      ukkonenSPA(phase);

      System.err.println(String.format("Phase %d results: ", phase));
      print(System.err);
      System.err.println();
      System.err.flush();
    }

    logger.log(Level.FINEST, String.format("Finishing edges: %d", extState.lastE));
    extState.finishFinalEdges();

    System.err.println(String.format("Finished results: "));
    print(System.err);
    System.err.println();
    System.err.flush();

    logger.exiting("UkkonenSuffixTree", "ukkonenExtendSuffixTree");
  }
  public void doLayout() {
    selections.clear();
    if (points == null) {
      points = new Vector<ExprPoint>();
    }
    points.clear();

    for (Pair<Integer, Double> p : watsonProbes) {
      ExprPoint ep = new ExprPoint(p.getFirst(), p.getLast(), '+');
      points.add(ep);
    }

    for (Pair<Integer, Double> p : crickProbes) {
      ExprPoint ep = new ExprPoint(p.getFirst(), p.getLast(), '-');
      points.add(ep);
    }
  }
Esempio n. 9
0
 public synchronized TaskReport[] getReduceTaskReports(String jobid) {
   JobInProgress job = (JobInProgress) jobs.get(jobid);
   if (job == null) {
     return new TaskReport[0];
   } else {
     Vector reports = new Vector();
     Vector completeReduceTasks = job.reportTasksInProgress(false, true);
     for (Iterator it = completeReduceTasks.iterator(); it.hasNext(); ) {
       TaskInProgress tip = (TaskInProgress) it.next();
       reports.add(tip.generateSingleReport());
     }
     Vector incompleteReduceTasks = job.reportTasksInProgress(false, false);
     for (Iterator it = incompleteReduceTasks.iterator(); it.hasNext(); ) {
       TaskInProgress tip = (TaskInProgress) it.next();
       reports.add(tip.generateSingleReport());
     }
     return (TaskReport[]) reports.toArray(new TaskReport[reports.size()]);
   }
 }
Esempio n. 10
0
  public void loadNetwork() throws IOException {
    logger.log(Level.INFO, "Loading metabolism network...");

    File file = props.getNetworkFile();
    Mapper<String, MetabolicEntry> entryMapper = new MetabolicEntry.MetabolicMapper();
    Parser<MetabolicEntry> parser = new Parser<MetabolicEntry>(file, entryMapper);

    while (parser.hasNext()) {
      MetabolicEntry entry = parser.next();
      entries.add(entry);

      Rxn rxn =
          new Rxn(
              props,
              entry.getReaction(),
              entry.getAbbreviation(),
              entry.getReactionName(),
              entry.getORF());
      reactions.put(entry.getAbbreviation(), rxn);

      locations.add(rxn.getLocation());

      LogicalORFTree lot = new LogicalORFTree(entry.getORF());
      ORFSet os = new ORFSet(entry.getORF());

      totalORFs.addAll(os.getORFs());
      orfSets.put(entry.getAbbreviation(), lot);
    }

    logger.log(Level.FINE, String.format("Loaded %d entries.", entries.size()));

    abbrevs = new MetabolismAbbreviations(props);
    abbrevs.loadAbbreviations();

    logger.log(Level.FINEST, "Loaded abbrevations.");
  }
Esempio n. 11
0
  private void naiveExtendSuffixTree(int arrayIdx) {
    TreeString string = strings.get(arrayIdx);

    // the array.length-1 constraint, instead of array.length, is because
    // we assume that the terminal character has already been added to the
    // string, and we don't want to *just* add the suffix that is that
    // character.
    for (int i = 0; i <= string.length(); i++) {
      logger.log(
          Level.FINEST,
          String.format("Naive Extension: \"%s\"", string.substring(i, string.length() + 1)));

      naiveExtendSuffix(string, i);
    }
  }
 private static void printFailMsg(
     Vector<String> expNames, Vector<String> retNames, String failMsg) {
   out.println();
   out.println(failMsg);
   if (expNames.size() == 0) {
     out.println("# there are NO expected logger names");
   } else {
     out.println("# expected logger names (" + expNames.size() + "):");
     for (int i = 0; i < expNames.size(); i++) {
       out.println(" expNames[" + i + "] = " + expNames.elementAt(i));
     }
   }
   if (retNames.size() == 0) {
     out.println("# there are NO returned logger names");
   } else {
     out.println("# returned logger names (" + retNames.size() + "):");
     for (int i = 0; i < retNames.size(); i++) {
       out.println("  retNames[" + i + "] = " + retNames.elementAt(i));
     }
   }
 }
Esempio n. 13
0
 public int size() {
   return strings.size();
 }
Esempio n. 14
0
 public TreeString getString(int i) {
   return strings.get(i);
 }
Esempio n. 15
0
  public void testStatelessSearch()
      throws org.jzkit.configuration.api.ConfigurationException, org.jzkit.search.SearchException,
          org.jzkit.search.util.ResultSet.IRResultSetException,
          org.jzkit.search.util.QueryModel.InvalidQueryException {

    Logger log = Logger.getLogger(TestService.class.getName());

    log.info("Starting jzkit2 server...");

    RecordFormatSpecification request_spec = new ArchetypeRecordFormatSpecification("F");
    ExplicitRecordFormatSpecification display_spec =
        new ExplicitRecordFormatSpecification("text:html:F");

    ApplicationContext app_context =
        new ClassPathXmlApplicationContext("TestApplicationContext.xml");
    log.info("JZKit server startup completed");

    Vector collection_ids = new Vector();
    collection_ids.add("LC/BOOKS");

    QueryModel qm = new PrefixString("@attrset bib-1 @attr 1=4 Science");

    System.err.println("Processing search......");

    try {
      Map additional_properties = new HashMap();
      additional_properties.put("base_dir", "/a/b/c/d");

      StatelessQueryService stateless_query_service =
          (StatelessQueryService) app_context.getBean("StatelessQueryService");

      org.jzkit.search.landscape.SimpleLandscapeSpecification landscape =
          new org.jzkit.search.landscape.SimpleLandscapeSpecification(collection_ids);
      // Test 1 - Kick off a search
      StatelessSearchResultsPageDTO rp =
          stateless_query_service.getResultsPageFor(
              null, qm, landscape, 1, 5, request_spec, display_spec, additional_properties);

      if (rp != null) {
        System.err.println(
            "Result Set Size....."
                + rp.total_hit_count
                + " records - result contains "
                + rp.number_of_records
                + " records");
        System.err.println("Result Set ID : " + rp.result_set_id);
      } else {
        System.err.println("Results page was null");
      }

      if (rp.records != null) {
        for (int i = 0; ((i < rp.records.length) && (i < 25)); i++) {
          System.err.println(
              "Getting next record (" + i + " out of " + rp.number_of_records + ").....");
          InformationFragment frag = rp.records[i];
          System.err.println(frag);
        }
      }

      // Test 2 - use the result set ID to get a page of requests
      rp =
          stateless_query_service.getResultsPageFor(
              rp.result_set_id,
              qm,
              landscape,
              6,
              5,
              request_spec,
              display_spec,
              additional_properties);

      if (rp.records != null) {
        for (int i = 0; ((i < rp.records.length) && (i < 25)); i++) {
          System.err.println(
              "Getting next record (" + i + " out of " + rp.number_of_records + ").....");
          InformationFragment frag = rp.records[i];
          System.err.println(frag);
        }
      }

      // Test 3 - Use the query to get a cache hit
      rp =
          stateless_query_service.getResultsPageFor(
              null, qm, landscape, 6, 5, request_spec, display_spec, additional_properties);

      if (rp.records != null) {
        for (int i = 0; ((i < rp.records.length) && (i < 25)); i++) {
          System.err.println(
              "Getting next record (" + i + " out of " + rp.number_of_records + ").....");
          InformationFragment frag = rp.records[i];
          System.err.println(frag);
        }
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
Esempio n. 16
0
  /**
   * A tracker wants to know if there's a Task to run. Returns a task we'd like the TaskTracker to
   * execute right now.
   *
   * <p>Eventually this function should compute load on the various TaskTrackers, and incorporate
   * knowledge of DFS file placement. But for right now, it just grabs a single item out of the
   * pending task list and hands it back.
   */
  public synchronized Task pollForNewTask(String taskTracker) {
    //
    // Compute average map and reduce task numbers across pool
    //
    int avgMaps = 0;
    int avgReduces = 0;
    int numTaskTrackers;
    TaskTrackerStatus tts;
    synchronized (taskTrackers) {
      numTaskTrackers = taskTrackers.size();
      tts = (TaskTrackerStatus) taskTrackers.get(taskTracker);
    }
    if (numTaskTrackers > 0) {
      avgMaps = totalMaps / numTaskTrackers;
      avgReduces = totalReduces / numTaskTrackers;
    }
    int totalCapacity = numTaskTrackers * maxCurrentTasks;
    //
    // Get map + reduce counts for the current tracker.
    //
    if (tts == null) {
      LOG.warning("Unknown task tracker polling; ignoring: " + taskTracker);
      return null;
    }

    int numMaps = tts.countMapTasks();
    int numReduces = tts.countReduceTasks();

    //
    // In the below steps, we allocate first a map task (if appropriate),
    // and then a reduce task if appropriate.  We go through all jobs
    // in order of job arrival; jobs only get serviced if their
    // predecessors are serviced, too.
    //

    //
    // We hand a task to the current taskTracker if the given machine
    // has a workload that's equal to or less than the averageMaps
    // +/- TASK_ALLOC_EPSILON.  (That epsilon is in place in case
    // there is an odd machine that is failing for some reason but
    // has not yet been removed from the pool, making capacity seem
    // larger than it really is.)
    //
    synchronized (jobsByArrival) {
      if ((numMaps < maxCurrentTasks) && (numMaps <= (avgMaps + TASK_ALLOC_EPSILON))) {

        int totalNeededMaps = 0;
        for (Iterator it = jobsByArrival.iterator(); it.hasNext(); ) {
          JobInProgress job = (JobInProgress) it.next();
          if (job.getStatus().getRunState() != JobStatus.RUNNING) {
            continue;
          }

          Task t = job.obtainNewMapTask(taskTracker, tts);
          if (t != null) {
            return t;
          }

          //
          // Beyond the highest-priority task, reserve a little
          // room for failures and speculative executions; don't
          // schedule tasks to the hilt.
          //
          totalNeededMaps += job.desiredMaps();
          double padding = 0;
          if (totalCapacity > MIN_SLOTS_FOR_PADDING) {
            padding = Math.min(maxCurrentTasks, totalNeededMaps * PAD_FRACTION);
          }
          if (totalNeededMaps + padding >= totalCapacity) {
            break;
          }
        }
      }

      //
      // Same thing, but for reduce tasks
      //
      if ((numReduces < maxCurrentTasks) && (numReduces <= (avgReduces + TASK_ALLOC_EPSILON))) {

        int totalNeededReduces = 0;
        for (Iterator it = jobsByArrival.iterator(); it.hasNext(); ) {
          JobInProgress job = (JobInProgress) it.next();
          if (job.getStatus().getRunState() != JobStatus.RUNNING) {
            continue;
          }

          Task t = job.obtainNewReduceTask(taskTracker, tts);
          if (t != null) {
            return t;
          }

          //
          // Beyond the highest-priority task, reserve a little
          // room for failures and speculative executions; don't
          // schedule tasks to the hilt.
          //
          totalNeededReduces += job.desiredReduces();
          double padding = 0;
          if (totalCapacity > MIN_SLOTS_FOR_PADDING) {
            padding = Math.min(maxCurrentTasks, totalNeededReduces * PAD_FRACTION);
          }
          if (totalNeededReduces + padding >= totalCapacity) {
            break;
          }
        }
      }
    }
    return null;
  }