@Override
  public void stage(MultiStagedJob mjob) {
    mjob.removeIntermediate(true);
    SequenceFileStage<Text, BytesWritable, Text, LongWritable, LongWritable, Text> collateWords =
        new SequenceFileStage<Text, BytesWritable, Text, LongWritable, LongWritable, Text>() {
          @Override
          public void setup(Job job) {
            job.getConfiguration().setInt(WORDCOUNT_THRESH, wordCountThreshold);
            job.getConfiguration().setInt(WORDCOUNT_TIMETHRESH, wordTimeThreshold);
            job.setNumReduceTasks(1);
          }

          @Override
          public Class<? extends Mapper<Text, BytesWritable, Text, LongWritable>> mapper() {
            return WordIndex.Map.class;
          }

          @Override
          public Class<? extends Reducer<Text, LongWritable, LongWritable, Text>> reducer() {
            return WordIndex.Reduce.class;
          }

          @Override
          public String outname() {
            return "words-collated";
          }
        };

    SequenceFileTextStage<LongWritable, Text, LongWritable, Text, NullWritable, Text> sortedWords =
        new SequenceFileTextStage<LongWritable, Text, LongWritable, Text, NullWritable, Text>() {
          @Override
          public void setup(Job job) {
            job.getConfiguration().setInt(WORDCOUNT_TOPN, topNWords);
            job.setSortComparatorClass(LongWritable.DecreasingComparator.class);
            job.setNumReduceTasks(1);
          }

          @Override
          public Class<? extends Reducer<LongWritable, Text, NullWritable, Text>> reducer() {
            return WordIndexSort.Reduce.class;
          }

          @Override
          public String outname() {
            return "words";
          }
        };

    mjob.queueStage(collateWords);
    mjob.queueStage(sortedWords);
  }
  @Override
  public void write(HadoopTwitterTokenToolOptions opts, TwitterTokenMode completedMode)
      throws Exception {
    MultiStagedJob stages;

    stages =
        new MultiStagedJob(
            HadoopToolsUtil.getInputPaths(
                completedMode.finalOutput(opts), CountWordsAcrossTimeperiod.WORDCOUNT_DIR),
            HadoopToolsUtil.getOutputPath(outputPath),
            opts.getArgs());
    stages.queueStage(new TimeWordJacardIndex());
    stages.runAll();
  }