Esempio n. 1
0
    public MockInstallProcess(MockHttpSession mockHttpSession) {
      ProgressTracker progressTracker =
          new ProgressTracker(mockHttpSession, ProgressTrackerTest.class.getName());

      progressTracker.addProgress(ProgressStatusConstants.DOWNLOADING, 25, "downloading");
      progressTracker.addProgress(ProgressStatusConstants.COPYING, 50, "copying");

      _progressTracker = progressTracker;
    }
Esempio n. 2
0
  @Test
  public void testInitialStatus() throws Exception {
    _mockInstallProcess.initialize();

    ProgressTracker progressTracker = getAttribute(ProgressTracker.PERCENT);

    Assert.assertEquals(ProgressStatusConstants.PREPARED, progressTracker.getStatus());
    Assert.assertEquals(StringPool.BLANK, progressTracker.getMessage());
    Assert.assertEquals(0, progressTracker.getPercent());
  }
Esempio n. 3
0
  @Test
  public void testGetStatus() throws Exception {
    _mockInstallProcess.initialize();

    ProgressTracker progressTracker = getAttribute(ProgressTracker.PERCENT);

    _mockInstallProcess.download();
    _mockInstallProcess.copy();

    Assert.assertEquals(ProgressStatusConstants.COPYING, progressTracker.getStatus());
  }
Esempio n. 4
0
  public void testGetMessage() throws Exception {
    _mockInstallProcess.initialize();

    ProgressTracker progressTracker = getAttribute(ProgressTracker.PERCENT);

    Assert.assertEquals(StringPool.BLANK, progressTracker.getMessage());

    _mockInstallProcess.download();

    progressTracker = getAttribute(ProgressTracker.PERCENT);

    Assert.assertEquals("downloading", progressTracker.getMessage());
  }
Esempio n. 5
0
  public void testGetPercent() throws Exception {
    _mockInstallProcess.initialize();

    ProgressTracker progressTracker = getAttribute(ProgressTracker.PERCENT);

    Assert.assertEquals(0, progressTracker.getPercent());

    _mockInstallProcess.download();
    _mockInstallProcess.copy();

    progressTracker = getAttribute(ProgressTracker.PERCENT);

    Assert.assertEquals(progressTracker.getPercent(), 50);
  }
Esempio n. 6
0
  private void markFileAsBad(Path file) {
    String fileName = file.toString();
    String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix));
    String originalName = new Path(fileNameMinusSuffix).getName();
    Path newFile = new Path(badFilesDirPath + Path.SEPARATOR + originalName);

    LOG.info(
        "Moving bad file {} to {}. Processed it till offset {}. SpoutID= {}",
        originalName,
        newFile,
        tracker.getCommitPosition(),
        spoutId);
    try {
      if (!hdfs.rename(
          file, newFile)) { // seems this can fail by returning false or throwing exception
        throw new IOException(
            "Move failed for bad file: " + file); // convert false ret value to exception
      }
    } catch (IOException e) {
      LOG.warn(
          "Error moving bad file: " + file + " to destination " + newFile + " SpoutId =" + spoutId,
          e);
    }
    closeReaderAndResetTrackers();
  }
Esempio n. 7
0
 @Override
 public void ack(Object msgId) {
   LOG.trace("Ack received for msg {} on spout {}", msgId, spoutId);
   if (!ackEnabled) {
     return;
   }
   MessageId id = (MessageId) msgId;
   inflight.remove(id);
   ++acksSinceLastCommit;
   tracker.recordAckedOffset(id.offset);
   commitProgress(tracker.getCommitPosition());
   if (fileReadCompletely && inflight.isEmpty()) {
     markFileAsDone(reader.getFilePath());
     reader = null;
   }
   super.ack(msgId);
 }
 protected static void callHandler(@Nullable ProgressTracker handler, @NotNull ProgressEvent event)
     throws VcsException {
   if (handler != null) {
     try {
       handler.consume(event);
     } catch (SVNException e) {
       throw new SvnBindException(e);
     }
   }
 }
Esempio n. 9
0
  public static void main(String[] args) throws IOException {
    Parser p = new Parser();
    p.parseAll();

    PrintWriter pw = new PrintWriter(new File(fname));
    AnswerPrinter ap = new AnswerPrinter(5, pw);

    int k = 0;
    ProgressTracker pt = new ProgressTracker(p.questions.size());
    Thread t = new Thread(pt);
    t.start();
    MLQuestionClassifier classify = new MLQuestionClassifier("train_5500.label.txt");
    WikiFilter wikiFilter = new WikiFilter();
    BaselineFilter baselineFilter = new BaselineFilter();
    for (int i : p.questions.keySet()) {
      Question q = p.questions.get(i);

      System.out.println("******CLASSIFYING QUESTION********");
      classify.classifyQuestion(q); // writes to q
      System.out.println("******RETRIEVING DOCUMENTS********");
      DocumentSet d =
          (new BaselineDocumentRetriever(p.raw_documents, p.raw_word_counts, p.raw_stem_counts))
              .getDocuments(q);
      System.out.println("******WIKI FILTER********");
      ArrayList<Answer> as = wikiFilter.filter(q, d);
      System.out.println("******NER FILTER********");
      ArrayList<Answer> nerFilter = baselineFilter.filter(as, q);
      System.out.println("******EXTRACTING ANSWERS********");
      ArrayList<Answer> finals = (new BaselineAnswerExtractor()).extractAnswers(q, nerFilter);

      if (finals.size() < 1) continue;
      System.out.println("Question: " + q.getQuestion() + "\nAnswer: " + finals.get(0).answer);

      k++;
      pt.updateCompletion(k);
      ap.printAnswers(q, finals);
      if (k > 2) break;
    }

    pw.flush();
    pw.close();
  }
Esempio n. 10
0
  public void nextTuple() {
    LOG.trace("Next Tuple {}", spoutId);
    // 1) First re-emit any previously failed tuples (from retryList)
    if (!retryList.isEmpty()) {
      LOG.debug("Sending tuple from retry list");
      HdfsUtils.Pair<MessageId, List<Object>> pair = retryList.remove();
      emitData(pair.getValue(), pair.getKey());
      return;
    }

    if (ackEnabled && tracker.size() >= maxOutstanding) {
      LOG.warn(
          "Waiting for more ACKs before generating new tuples. "
              + "Progress tracker size has reached limit {}, SpoutID {}",
          maxOutstanding,
          spoutId);
      // Don't emit anything .. allow configured spout wait strategy to kick in
      return;
    }

    // 2) If no failed tuples to be retried, then send tuples from hdfs
    while (true) {
      try {
        // 3) Select a new file if one is not open already
        if (reader == null) {
          reader = pickNextFile();
          if (reader == null) {
            LOG.debug("Currently no new files to process under : " + sourceDirPath);
            return;
          } else {
            fileReadCompletely = false;
          }
        }
        if (fileReadCompletely) { // wait for more ACKs before proceeding
          return;
        }
        // 4) Read record from file, emit to collector and record progress
        List<Object> tuple = reader.next();
        if (tuple != null) {
          fileReadCompletely = false;
          ++tupleCounter;
          MessageId msgId =
              new MessageId(tupleCounter, reader.getFilePath(), reader.getFileOffset());
          emitData(tuple, msgId);

          if (!ackEnabled) {
            ++acksSinceLastCommit; // assume message is immediately ACKed in non-ack mode
            commitProgress(reader.getFileOffset());
          } else {
            commitProgress(tracker.getCommitPosition());
          }
          return;
        } else {
          fileReadCompletely = true;
          if (!ackEnabled) {
            markFileAsDone(reader.getFilePath());
          }
        }
      } catch (IOException e) {
        LOG.error("I/O Error processing at file location " + getFileProgress(reader), e);
        // don't emit anything .. allow configured spout wait strategy to kick in
        return;
      } catch (ParseException e) {
        LOG.error(
            "Parsing error when processing at file location "
                + getFileProgress(reader)
                + ". Skipping remainder of file.",
            e);
        markFileAsBad(reader.getFilePath());
        // Note: We don't return from this method on ParseException to avoid triggering the
        // spout wait strategy (due to no emits). Instead we go back into the loop and
        // generate a tuple from next file
      }
    } // while
  }