/* (non-Javadoc)
   * @see org.apache.hadoop.mapred.MapReduceBase#configure(org.apache.hadoop.mapred.JobConf)
   */
  public void configure(JobConf job) {
    iconf = new IndexUpdateConfiguration(job);
    analyzer = (Analyzer) ReflectionUtils.newInstance(iconf.getDocumentAnalyzerClass(), job);

    localAnalysis =
        (ILocalAnalysis) ReflectionUtils.newInstance(iconf.getLocalAnalysisClass(), job);
    localAnalysis.configure(job);

    shards = Shard.getIndexShards(iconf);

    distributionPolicy =
        (IDistributionPolicy) ReflectionUtils.newInstance(iconf.getDistributionPolicyClass(), job);
    distributionPolicy.init(shards);

    LOG.info("sea.document.analyzer = " + analyzer.getClass().getName());
    LOG.info("sea.local.analysis = " + localAnalysis.getClass().getName());
    LOG.info(shards.length + " shards = " + iconf.getIndexShards());
    LOG.info("sea.distribution.policy = " + distributionPolicy.getClass().getName());
  }
예제 #2
0
  private IndexWriter createWriter() throws IOException {
    IndexWriter writer = new IndexWriter(dir, false, null, new KeepOnlyLastCommitDeletionPolicy());
    writer.setUseCompoundFile(false);

    if (iconf != null) {
      int maxFieldLength = iconf.getIndexMaxFieldLength();
      if (maxFieldLength > 0) {
        writer.setMaxFieldLength(maxFieldLength);
      }
    }

    return writer;
  }