/**
 * A base class for read operations that needs to be performed on the master node. Can also be
 * executed on the local node if needed.
 */
public abstract class TransportMasterNodeReadAction<
        Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
    extends TransportMasterNodeAction<Request, Response> {

  public static final Setting<Boolean> FORCE_LOCAL_SETTING =
      Setting.boolSetting("action.master.force_local", false, Property.NodeScope);

  private final boolean forceLocal;

  protected TransportMasterNodeReadAction(
      Settings settings,
      String actionName,
      TransportService transportService,
      ClusterService clusterService,
      ThreadPool threadPool,
      ActionFilters actionFilters,
      IndexNameExpressionResolver indexNameExpressionResolver,
      Supplier<Request> request) {
    super(
        settings,
        actionName,
        transportService,
        clusterService,
        threadPool,
        actionFilters,
        indexNameExpressionResolver,
        request);
    this.forceLocal = FORCE_LOCAL_SETTING.get(settings);
  }

  @Override
  protected final boolean localExecute(Request request) {
    return forceLocal || request.local();
  }
}
 public static final class TcpSettings {
   public static final Setting<Boolean> TCP_NO_DELAY =
       Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_KEEP_ALIVE =
       Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_REUSE_ADDRESS =
       Setting.boolSetting(
           "network.tcp.reuse_address",
           NetworkUtils.defaultReuseAddress(),
           false,
           Setting.Scope.CLUSTER);
   public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
       Setting.byteSizeSetting(
           "network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
   public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
       Setting.byteSizeSetting(
           "network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING =
       Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING_SERVER =
       Setting.boolSetting(
           "network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
   public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
       Setting.boolSetting(
           "network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
   public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
       Setting.timeSetting(
           "network.tcp.connect_timeout",
           new TimeValue(30, TimeUnit.SECONDS),
           false,
           Setting.Scope.CLUSTER);
 }
public class PercolatorFieldMapper extends FieldMapper {

  public static final XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE;
  public static final Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
      Setting.boolSetting(
          "index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope);
  public static final String CONTENT_TYPE = "percolator";
  private static final FieldType FIELD_TYPE = new FieldType();

  static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point
  static final String EXTRACTION_COMPLETE = "complete";
  static final String EXTRACTION_PARTIAL = "partial";
  static final String EXTRACTION_FAILED = "failed";

  public static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
  public static final String EXTRACTION_RESULT_FIELD_NAME = "extraction_result";
  public static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field";

  public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {

    private final QueryShardContext queryShardContext;

    public Builder(String fieldName, QueryShardContext queryShardContext) {
      super(fieldName, FIELD_TYPE, FIELD_TYPE);
      this.queryShardContext = queryShardContext;
    }

    @Override
    public PercolatorFieldMapper build(BuilderContext context) {
      context.path().add(name());
      FieldType fieldType = (FieldType) this.fieldType;
      KeywordFieldMapper extractedTermsField =
          createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME, context);
      fieldType.queryTermsField = extractedTermsField.fieldType();
      KeywordFieldMapper extractionResultField =
          createExtractQueryFieldBuilder(EXTRACTION_RESULT_FIELD_NAME, context);
      fieldType.extractionResultField = extractionResultField.fieldType();
      BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder(context);
      fieldType.queryBuilderField = queryBuilderField.fieldType();
      context.path().remove();
      setupFieldType(context);
      return new PercolatorFieldMapper(
          name(),
          fieldType,
          defaultFieldType,
          context.indexSettings(),
          multiFieldsBuilder.build(this, context),
          copyTo,
          queryShardContext,
          extractedTermsField,
          extractionResultField,
          queryBuilderField);
    }

    static KeywordFieldMapper createExtractQueryFieldBuilder(String name, BuilderContext context) {
      KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name);
      queryMetaDataFieldBuilder.docValues(false);
      queryMetaDataFieldBuilder.store(false);
      queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
      return queryMetaDataFieldBuilder.build(context);
    }

    static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) {
      BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME);
      builder.docValues(true);
      builder.indexOptions(IndexOptions.NONE);
      builder.store(false);
      builder.fieldType().setDocValuesType(DocValuesType.BINARY);
      return builder.build(context);
    }
  }

  public static class TypeParser implements FieldMapper.TypeParser {

    @Override
    public Builder parse(String name, Map<String, Object> node, ParserContext parserContext)
        throws MapperParsingException {
      return new Builder(name, parserContext.queryShardContext());
    }
  }

  public static class FieldType extends MappedFieldType {

    MappedFieldType queryTermsField;
    MappedFieldType extractionResultField;
    MappedFieldType queryBuilderField;

    public FieldType() {
      setIndexOptions(IndexOptions.NONE);
      setDocValuesType(DocValuesType.NONE);
      setStored(false);
    }

    public FieldType(FieldType ref) {
      super(ref);
      queryTermsField = ref.queryTermsField;
      extractionResultField = ref.extractionResultField;
      queryBuilderField = ref.queryBuilderField;
    }

    @Override
    public MappedFieldType clone() {
      return new FieldType(this);
    }

    @Override
    public String typeName() {
      return CONTENT_TYPE;
    }

    @Override
    public Query termQuery(Object value, QueryShardContext context) {
      throw new QueryShardException(
          context, "Percolator fields are not searchable directly, use a percolate query instead");
    }

    public Query percolateQuery(
        String documentType,
        PercolateQuery.QueryStore queryStore,
        BytesReference documentSource,
        IndexSearcher searcher)
        throws IOException {
      IndexReader indexReader = searcher.getIndexReader();
      Query candidateMatchesQuery = createCandidateQuery(indexReader);
      Query verifiedMatchesQuery;
      // We can only skip the MemoryIndex verification when percolating a single document.
      // When the document being percolated contains a nested object field then the MemoryIndex
      // contains multiple
      // documents. In this case the term query that indicates whether memory index verification can
      // be skipped
      // can incorrectly indicate that non nested queries would match, while their nested variants
      // would not.
      if (indexReader.maxDoc() == 1) {
        verifiedMatchesQuery =
            new TermQuery(new Term(extractionResultField.name(), EXTRACTION_COMPLETE));
      } else {
        verifiedMatchesQuery = new MatchNoDocsQuery("nested docs, so no verified matches");
      }
      return new PercolateQuery(
          documentType,
          queryStore,
          documentSource,
          candidateMatchesQuery,
          searcher,
          verifiedMatchesQuery);
    }

    Query createCandidateQuery(IndexReader indexReader) throws IOException {
      List<Term> extractedTerms = new ArrayList<>();
      // include extractionResultField:failed, because docs with this term have no
      // extractedTermsField
      // and otherwise we would fail to return these docs. Docs that failed query term extraction
      // always need to be verified by MemoryIndex:
      extractedTerms.add(new Term(extractionResultField.name(), EXTRACTION_FAILED));

      LeafReader reader = indexReader.leaves().get(0).reader();
      Fields fields = reader.fields();
      for (String field : fields) {
        Terms terms = fields.terms(field);
        if (terms == null) {
          continue;
        }

        BytesRef fieldBr = new BytesRef(field);
        TermsEnum tenum = terms.iterator();
        for (BytesRef term = tenum.next(); term != null; term = tenum.next()) {
          BytesRefBuilder builder = new BytesRefBuilder();
          builder.append(fieldBr);
          builder.append(FIELD_VALUE_SEPARATOR);
          builder.append(term);
          extractedTerms.add(new Term(queryTermsField.name(), builder.toBytesRef()));
        }
      }
      return new TermsQuery(extractedTerms);
    }
  }

  private final boolean mapUnmappedFieldAsString;
  private final QueryShardContext queryShardContext;
  private KeywordFieldMapper queryTermsField;
  private KeywordFieldMapper extractionResultField;
  private BinaryFieldMapper queryBuilderField;

  public PercolatorFieldMapper(
      String simpleName,
      MappedFieldType fieldType,
      MappedFieldType defaultFieldType,
      Settings indexSettings,
      MultiFields multiFields,
      CopyTo copyTo,
      QueryShardContext queryShardContext,
      KeywordFieldMapper queryTermsField,
      KeywordFieldMapper extractionResultField,
      BinaryFieldMapper queryBuilderField) {
    super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
    this.queryShardContext = queryShardContext;
    this.queryTermsField = queryTermsField;
    this.extractionResultField = extractionResultField;
    this.queryBuilderField = queryBuilderField;
    this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
  }

  @Override
  public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
    PercolatorFieldMapper updated =
        (PercolatorFieldMapper) super.updateFieldType(fullNameToFieldType);
    KeywordFieldMapper queryTermsUpdated =
        (KeywordFieldMapper) queryTermsField.updateFieldType(fullNameToFieldType);
    KeywordFieldMapper extractionResultUpdated =
        (KeywordFieldMapper) extractionResultField.updateFieldType(fullNameToFieldType);
    BinaryFieldMapper queryBuilderUpdated =
        (BinaryFieldMapper) queryBuilderField.updateFieldType(fullNameToFieldType);

    if (updated == this
        && queryTermsUpdated == queryTermsField
        && extractionResultUpdated == extractionResultField
        && queryBuilderUpdated == queryBuilderField) {
      return this;
    }
    if (updated == this) {
      updated = (PercolatorFieldMapper) updated.clone();
    }
    updated.queryTermsField = queryTermsUpdated;
    updated.extractionResultField = extractionResultUpdated;
    updated.queryBuilderField = queryBuilderUpdated;
    return updated;
  }

  @Override
  public Mapper parse(ParseContext context) throws IOException {
    QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext);
    if (context.doc().getField(queryBuilderField.name()) != null) {
      // If a percolator query has been defined in an array object then multiple percolator queries
      // could be provided. In order to prevent this we fail if we try to parse more than one query
      // for the current document.
      throw new IllegalArgumentException("a document can only contain one percolator query");
    }

    XContentParser parser = context.parser();
    QueryBuilder queryBuilder =
        parseQueryBuilder(queryShardContext.newParseContext(parser), parser.getTokenLocation());
    verifyQuery(queryBuilder);
    // Fetching of terms, shapes and indexed scripts happen during this rewrite:
    queryBuilder = queryBuilder.rewrite(queryShardContext);

    try (XContentBuilder builder = XContentFactory.contentBuilder(QUERY_BUILDER_CONTENT_TYPE)) {
      queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap()));
      builder.flush();
      byte[] queryBuilderAsBytes = BytesReference.toBytes(builder.bytes());
      context
          .doc()
          .add(
              new Field(
                  queryBuilderField.name(), queryBuilderAsBytes, queryBuilderField.fieldType()));
    }

    Query query = toQuery(queryShardContext, mapUnmappedFieldAsString, queryBuilder);
    processQuery(query, context);
    return null;
  }

  void processQuery(Query query, ParseContext context) {
    ParseContext.Document doc = context.doc();
    FieldType pft = (FieldType) this.fieldType();
    QueryAnalyzer.Result result;
    try {
      result = QueryAnalyzer.analyze(query);
    } catch (QueryAnalyzer.UnsupportedQueryException e) {
      doc.add(
          new Field(
              pft.extractionResultField.name(),
              EXTRACTION_FAILED,
              extractionResultField.fieldType()));
      return;
    }
    for (Term term : result.terms) {
      BytesRefBuilder builder = new BytesRefBuilder();
      builder.append(new BytesRef(term.field()));
      builder.append(FIELD_VALUE_SEPARATOR);
      builder.append(term.bytes());
      doc.add(new Field(queryTermsField.name(), builder.toBytesRef(), queryTermsField.fieldType()));
    }
    if (result.verified) {
      doc.add(
          new Field(
              extractionResultField.name(),
              EXTRACTION_COMPLETE,
              extractionResultField.fieldType()));
    } else {
      doc.add(
          new Field(
              extractionResultField.name(), EXTRACTION_PARTIAL, extractionResultField.fieldType()));
    }
  }

  public static Query parseQuery(
      QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser)
      throws IOException {
    return parseQuery(context, mapUnmappedFieldsAsString, context.newParseContext(parser), parser);
  }

  public static Query parseQuery(
      QueryShardContext context,
      boolean mapUnmappedFieldsAsString,
      QueryParseContext queryParseContext,
      XContentParser parser)
      throws IOException {
    return toQuery(
        context,
        mapUnmappedFieldsAsString,
        parseQueryBuilder(queryParseContext, parser.getTokenLocation()));
  }

  static Query toQuery(
      QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder queryBuilder)
      throws IOException {
    // This means that fields in the query need to exist in the mapping prior to registering this
    // query
    // The reason that this is required, is that if a field doesn't exist then the query assumes
    // defaults, which may be undesired.
    //
    // Even worse when fields mentioned in percolator queries do go added to map after the queries
    // have been registered
    // then the percolator queries don't work as expected any more.
    //
    // Query parsing can't introduce new fields in mappings (which happens when registering a
    // percolator query),
    // because field type can't be inferred from queries (like document do) so the best option here
    // is to disallow
    // the usage of unmapped fields in percolator queries to avoid unexpected behaviour
    //
    // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped
    // fields which will be mapped
    // as an analyzed string.
    context.setAllowUnmappedFields(false);
    context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString);
    return queryBuilder.toQuery(context);
  }

  private static QueryBuilder parseQueryBuilder(
      QueryParseContext context, XContentLocation location) {
    try {
      return context
          .parseInnerQueryBuilder()
          .orElseThrow(
              () -> new ParsingException(location, "Failed to parse inner query, was empty"));
    } catch (IOException e) {
      throw new ParsingException(location, "Failed to parse", e);
    }
  }

  @Override
  public Iterator<Mapper> iterator() {
    return Arrays.<Mapper>asList(queryTermsField, extractionResultField, queryBuilderField)
        .iterator();
  }

  @Override
  protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
    throw new UnsupportedOperationException("should not be invoked");
  }

  @Override
  protected String contentType() {
    return CONTENT_TYPE;
  }

  /**
   * Fails if a percolator contains an unsupported query. The following queries are not supported:
   * 1) a range query with a date range based on current time 2) a has_child query 3) a has_parent
   * query
   */
  static void verifyQuery(QueryBuilder queryBuilder) {
    if (queryBuilder instanceof RangeQueryBuilder) {
      RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) queryBuilder;
      if (rangeQueryBuilder.from() instanceof String) {
        String from = (String) rangeQueryBuilder.from();
        String to = (String) rangeQueryBuilder.to();
        if (from.contains("now") || to.contains("now")) {
          throw new IllegalArgumentException(
              "percolator queries containing time range queries based on the "
                  + "current time is unsupported");
        }
      }
    } else if (queryBuilder instanceof HasChildQueryBuilder) {
      throw new IllegalArgumentException(
          "the [has_child] query is unsupported inside a percolator query");
    } else if (queryBuilder instanceof HasParentQueryBuilder) {
      throw new IllegalArgumentException(
          "the [has_parent] query is unsupported inside a percolator query");
    } else if (queryBuilder instanceof BoolQueryBuilder) {
      BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder;
      List<QueryBuilder> clauses = new ArrayList<>();
      clauses.addAll(boolQueryBuilder.filter());
      clauses.addAll(boolQueryBuilder.must());
      clauses.addAll(boolQueryBuilder.mustNot());
      clauses.addAll(boolQueryBuilder.should());
      for (QueryBuilder clause : clauses) {
        verifyQuery(clause);
      }
    } else if (queryBuilder instanceof ConstantScoreQueryBuilder) {
      verifyQuery(((ConstantScoreQueryBuilder) queryBuilder).innerQuery());
    } else if (queryBuilder instanceof FunctionScoreQueryBuilder) {
      verifyQuery(((FunctionScoreQueryBuilder) queryBuilder).query());
    } else if (queryBuilder instanceof BoostingQueryBuilder) {
      verifyQuery(((BoostingQueryBuilder) queryBuilder).negativeQuery());
      verifyQuery(((BoostingQueryBuilder) queryBuilder).positiveQuery());
    }
  }
}
/**
 * The merge scheduler (<code>ConcurrentMergeScheduler</code>) controls the execution of merge
 * operations once they are needed (according to the merge policy). Merges run in separate threads,
 * and when the maximum number of threads is reached, further merges will wait until a merge thread
 * becomes available.
 *
 * <p>The merge scheduler supports the following <b>dynamic</b> settings:
 *
 * <ul>
 *   <li><code>index.merge.scheduler.max_thread_count</code>:
 *       <p>The maximum number of threads that may be merging at once. Defaults to <code>
 *       Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))</code> which works
 *       well for a good solid-state-disk (SSD). If your index is on spinning platter drives
 *       instead, decrease this to 1.
 *   <li><code>index.merge.scheduler.auto_throttle</code>:
 *       <p>If this is true (the default), then the merge scheduler will rate-limit IO (writes) for
 *       merges to an adaptive value depending on how many merges are requested over time. An
 *       application with a low indexing rate that unluckily suddenly requires a large merge will
 *       see that merge aggressively throttled, while an application doing heavy indexing will see
 *       the throttle move higher to allow merges to keep up with ongoing indexing.
 * </ul>
 */
public final class MergeSchedulerConfig {

  public static final Setting<Integer> MAX_THREAD_COUNT_SETTING =
      new Setting<>(
          "index.merge.scheduler.max_thread_count",
          (s) ->
              Integer.toString(
                  Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))),
          (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"),
          Property.Dynamic,
          Property.IndexScope);
  public static final Setting<Integer> MAX_MERGE_COUNT_SETTING =
      new Setting<>(
          "index.merge.scheduler.max_merge_count",
          (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5),
          (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"),
          Property.Dynamic,
          Property.IndexScope);
  public static final Setting<Boolean> AUTO_THROTTLE_SETTING =
      Setting.boolSetting(
          "index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope);

  private volatile boolean autoThrottle;
  private volatile int maxThreadCount;
  private volatile int maxMergeCount;

  MergeSchedulerConfig(IndexSettings indexSettings) {
    maxThreadCount = indexSettings.getValue(MAX_THREAD_COUNT_SETTING);
    maxMergeCount = indexSettings.getValue(MAX_MERGE_COUNT_SETTING);
    this.autoThrottle = indexSettings.getValue(AUTO_THROTTLE_SETTING);
  }

  /**
   * Returns <code>true</code> iff auto throttle is enabled.
   *
   * @see ConcurrentMergeScheduler#enableAutoIOThrottle()
   */
  public boolean isAutoThrottle() {
    return autoThrottle;
  }

  /** Enables / disables auto throttling on the {@link ConcurrentMergeScheduler} */
  void setAutoThrottle(boolean autoThrottle) {
    this.autoThrottle = autoThrottle;
  }

  /** Returns {@code maxThreadCount}. */
  public int getMaxThreadCount() {
    return maxThreadCount;
  }

  /** Expert: directly set the maximum number of merge threads and simultaneous merges allowed. */
  void setMaxThreadCount(int maxThreadCount) {
    this.maxThreadCount = maxThreadCount;
  }

  /** Returns {@code maxMergeCount}. */
  public int getMaxMergeCount() {
    return maxMergeCount;
  }

  /** Expert: set the maximum number of simultaneous merges allowed. */
  void setMaxMergeCount(int maxMergeCount) {
    this.maxMergeCount = maxMergeCount;
  }
}
Пример #5
0
public class MapperService extends AbstractIndexComponent {

  /** The reason why a mapping is being merged. */
  public enum MergeReason {
    /** Create or update a mapping. */
    MAPPING_UPDATE,
    /**
     * Recovery of an existing mapping, for instance because of a restart, if a shard was moved to a
     * different node or for administrative purposes.
     */
    MAPPING_RECOVERY;
  }

  public static final String DEFAULT_MAPPING = "_default_";
  public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_DEPTH_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope);
  public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
  public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
      Setting.boolSetting(
          "index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
  private static ObjectHashSet<String> META_FIELDS =
      ObjectHashSet.from(
          "_uid",
          "_id",
          "_type",
          "_all",
          "_parent",
          "_routing",
          "_index",
          "_size",
          "_timestamp",
          "_ttl");

  private final AnalysisService analysisService;

  /** Will create types automatically if they do not exists in the mapping definition yet */
  private final boolean dynamic;

  private volatile String defaultMappingSource;

  private volatile Map<String, DocumentMapper> mappers = emptyMap();

  private volatile FieldTypeLookup fieldTypes;
  private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
  private boolean hasNested = false; // updated dynamically to true when a nested object is added

  private final DocumentMapperParser documentParser;

  private final MapperAnalyzerWrapper indexAnalyzer;
  private final MapperAnalyzerWrapper searchAnalyzer;
  private final MapperAnalyzerWrapper searchQuoteAnalyzer;

  private volatile Map<String, MappedFieldType> unmappedFieldTypes = emptyMap();

  private volatile Set<String> parentTypes = emptySet();

  final MapperRegistry mapperRegistry;

  public MapperService(
      IndexSettings indexSettings,
      AnalysisService analysisService,
      SimilarityService similarityService,
      MapperRegistry mapperRegistry,
      Supplier<QueryShardContext> queryShardContextSupplier) {
    super(indexSettings);
    this.analysisService = analysisService;
    this.fieldTypes = new FieldTypeLookup();
    this.documentParser =
        new DocumentMapperParser(
            indexSettings,
            this,
            analysisService,
            similarityService,
            mapperRegistry,
            queryShardContextSupplier);
    this.indexAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
    this.searchAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
    this.searchQuoteAnalyzer =
        new MapperAnalyzerWrapper(
            analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
    this.mapperRegistry = mapperRegistry;

    this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
    if (index().getName().equals(ScriptService.SCRIPT_INDEX)) {
      defaultMappingSource =
          "{"
              + "\"_default_\": {"
              + "\"properties\": {"
              + "\"script\": { \"enabled\": false },"
              + "\"template\": { \"enabled\": false }"
              + "}"
              + "}"
              + "}";
    } else {
      defaultMappingSource = "{\"_default_\":{}}";
    }

    if (logger.isTraceEnabled()) {
      logger.trace("using dynamic[{}], default mapping source[{}]", dynamic, defaultMappingSource);
    } else if (logger.isDebugEnabled()) {
      logger.debug("using dynamic[{}]", dynamic);
    }
  }

  public boolean hasNested() {
    return this.hasNested;
  }

  /**
   * returns an immutable iterator over current document mappers.
   *
   * @param includingDefaultMapping indicates whether the iterator should contain the {@link
   *     #DEFAULT_MAPPING} document mapper. As is this not really an active type, you would
   *     typically set this to false
   */
  public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
    return () -> {
      final Collection<DocumentMapper> documentMappers;
      if (includingDefaultMapping) {
        documentMappers = mappers.values();
      } else {
        documentMappers =
            mappers
                .values()
                .stream()
                .filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type()))
                .collect(Collectors.toList());
      }
      return Collections.unmodifiableCollection(documentMappers).iterator();
    };
  }

  public AnalysisService analysisService() {
    return this.analysisService;
  }

  public DocumentMapperParser documentMapperParser() {
    return this.documentParser;
  }

  public DocumentMapper merge(
      String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
    if (DEFAULT_MAPPING.equals(type)) {
      // verify we can parse it
      // NOTE: never apply the default here
      DocumentMapper mapper = documentParser.parse(type, mappingSource);
      // still add it as a document mapper so we have it registered and, for example, persisted back
      // into
      // the cluster meta data if needed, or checked for existence
      synchronized (this) {
        mappers = newMapBuilder(mappers).put(type, mapper).map();
      }
      try {
        defaultMappingSource = mappingSource.string();
      } catch (IOException e) {
        throw new ElasticsearchGenerationException("failed to un-compress", e);
      }
      return mapper;
    } else {
      synchronized (this) {
        final boolean applyDefault =
            // the default was already applied if we are recovering
            reason != MergeReason.MAPPING_RECOVERY
                // only apply the default mapping if we don't have the type yet
                && mappers.containsKey(type) == false;
        DocumentMapper mergeWith = parse(type, mappingSource, applyDefault);
        return merge(mergeWith, reason, updateAllTypes);
      }
    }
  }

  private synchronized DocumentMapper merge(
      DocumentMapper mapper, MergeReason reason, boolean updateAllTypes) {
    if (mapper.type().length() == 0) {
      throw new InvalidTypeNameException("mapping type name is empty");
    }
    if (mapper.type().length() > 255) {
      throw new InvalidTypeNameException(
          "mapping type name ["
              + mapper.type()
              + "] is too long; limit is length 255 but was ["
              + mapper.type().length()
              + "]");
    }
    if (mapper.type().charAt(0) == '_') {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] can't start with '_'");
    }
    if (mapper.type().contains("#")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include '#' in it");
    }
    if (mapper.type().contains(",")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include ',' in it");
    }
    if (mapper.type().equals(mapper.parentFieldMapper().type())) {
      throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
    }
    if (typeNameStartsWithIllegalDot(mapper)) {
      throw new IllegalArgumentException(
          "mapping type name [" + mapper.type() + "] must not start with a '.'");
    }

    // 1. compute the merged DocumentMapper
    DocumentMapper oldMapper = mappers.get(mapper.type());
    DocumentMapper newMapper;
    if (oldMapper != null) {
      newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
    } else {
      newMapper = mapper;
    }

    // 2. check basic sanity of the new mapping
    List<ObjectMapper> objectMappers = new ArrayList<>();
    List<FieldMapper> fieldMappers = new ArrayList<>();
    Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
    MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
    checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
    checkObjectsCompatibility(newMapper.type(), objectMappers, fieldMappers, updateAllTypes);

    // 3. update lookup data-structures
    // this will in particular make sure that the merged fields are compatible with other types
    FieldTypeLookup fieldTypes =
        this.fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);

    boolean hasNested = this.hasNested;
    Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
    for (ObjectMapper objectMapper : objectMappers) {
      fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
      if (objectMapper.nested().isNested()) {
        hasNested = true;
      }
    }
    fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);

    if (reason == MergeReason.MAPPING_UPDATE) {
      // this check will only be performed on the master node when there is
      // a call to the update mapping API. For all other cases like
      // the master node restoring mappings from disk or data nodes
      // deserializing cluster state that was sent by the master node,
      // this check will be skipped.
      checkNestedFieldsLimit(fullPathObjectMappers);
      checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
      checkDepthLimit(fullPathObjectMappers.keySet());
      checkPercolatorFieldLimit(fieldTypes);
    }

    Set<String> parentTypes = this.parentTypes;
    if (oldMapper == null && newMapper.parentFieldMapper().active()) {
      parentTypes = new HashSet<>(parentTypes.size() + 1);
      parentTypes.addAll(this.parentTypes);
      parentTypes.add(mapper.parentFieldMapper().type());
      parentTypes = Collections.unmodifiableSet(parentTypes);
    }

    Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
    mappers.put(newMapper.type(), newMapper);
    for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
      if (entry.getKey().equals(DEFAULT_MAPPING)) {
        continue;
      }
      DocumentMapper m = entry.getValue();
      // apply changes to the field types back
      m = m.updateFieldType(fieldTypes.fullNameToFieldType);
      entry.setValue(m);
    }
    mappers = Collections.unmodifiableMap(mappers);

    // 4. commit the change
    this.mappers = mappers;
    this.fieldTypes = fieldTypes;
    this.hasNested = hasNested;
    this.fullPathObjectMappers = fullPathObjectMappers;
    this.parentTypes = parentTypes;

    assert assertSerialization(newMapper);
    assert assertMappersShareSameFieldType();

    return newMapper;
  }

  private boolean assertMappersShareSameFieldType() {
    for (DocumentMapper mapper : docMappers(false)) {
      List<FieldMapper> fieldMappers = new ArrayList<>();
      Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
      MapperUtils.collect(mapper.root(), new ArrayList<ObjectMapper>(), fieldMappers);
      for (FieldMapper fieldMapper : fieldMappers) {
        assert fieldMapper.fieldType() == fieldTypes.get(fieldMapper.name()) : fieldMapper.name();
      }
    }
    return true;
  }

  private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
    boolean legacyIndex =
        getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha1);
    if (legacyIndex) {
      return mapper.type().startsWith(".")
          && !PercolatorFieldMapper.LEGACY_TYPE_NAME.equals(mapper.type());
    } else {
      return mapper.type().startsWith(".");
    }
  }

  private boolean assertSerialization(DocumentMapper mapper) {
    // capture the source now, it may change due to concurrent parsing
    final CompressedXContent mappingSource = mapper.mappingSource();
    DocumentMapper newMapper = parse(mapper.type(), mappingSource, false);

    if (newMapper.mappingSource().equals(mappingSource) == false) {
      throw new IllegalStateException(
          "DocumentMapper serialization result is different from source. \n--> Source ["
              + mappingSource
              + "]\n--> Result ["
              + newMapper.mappingSource()
              + "]");
    }
    return true;
  }

  private void checkFieldUniqueness(
      String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
    assert Thread.holdsLock(this);

    // first check within mapping
    final Set<String> objectFullNames = new HashSet<>();
    for (ObjectMapper objectMapper : objectMappers) {
      final String fullPath = objectMapper.fullPath();
      if (objectFullNames.add(fullPath) == false) {
        throw new IllegalArgumentException(
            "Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
      }
    }

    final Set<String> fieldNames = new HashSet<>();
    for (FieldMapper fieldMapper : fieldMappers) {
      final String name = fieldMapper.name();
      if (objectFullNames.contains(name)) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined both as an object and a field in [" + type + "]");
      } else if (fieldNames.add(name) == false) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined twice in [" + type + "]");
      }
    }

    // then check other types
    for (String fieldName : fieldNames) {
      if (fullPathObjectMappers.containsKey(fieldName)) {
        throw new IllegalArgumentException(
            "["
                + fieldName
                + "] is defined as a field in mapping ["
                + type
                + "] but this name is already used for an object in other types");
      }
    }

    for (String objectPath : objectFullNames) {
      if (fieldTypes.get(objectPath) != null) {
        throw new IllegalArgumentException(
            "["
                + objectPath
                + "] is defined as an object in mapping ["
                + type
                + "] but this name is already used for a field in other types");
      }
    }
  }

  private void checkObjectsCompatibility(
      String type,
      Collection<ObjectMapper> objectMappers,
      Collection<FieldMapper> fieldMappers,
      boolean updateAllTypes) {
    assert Thread.holdsLock(this);

    for (ObjectMapper newObjectMapper : objectMappers) {
      ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
      if (existingObjectMapper != null) {
        // simulate a merge and ignore the result, we are just interested
        // in exceptions here
        existingObjectMapper.merge(newObjectMapper, updateAllTypes);
      }
    }
  }

  private void checkNestedFieldsLimit(Map<String, ObjectMapper> fullPathObjectMappers) {
    long allowedNestedFields = indexSettings.getValue(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING);
    long actualNestedFields = 0;
    for (ObjectMapper objectMapper : fullPathObjectMappers.values()) {
      if (objectMapper.nested().isNested()) {
        actualNestedFields++;
      }
    }
    if (actualNestedFields > allowedNestedFields) {
      throw new IllegalArgumentException(
          "Limit of nested fields ["
              + allowedNestedFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkTotalFieldsLimit(long totalMappers) {
    long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
    if (allowedTotalFields < totalMappers) {
      throw new IllegalArgumentException(
          "Limit of total fields ["
              + allowedTotalFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkDepthLimit(Collection<String> objectPaths) {
    final long maxDepth = indexSettings.getValue(INDEX_MAPPING_DEPTH_LIMIT_SETTING);
    for (String objectPath : objectPaths) {
      checkDepthLimit(objectPath, maxDepth);
    }
  }

  private void checkDepthLimit(String objectPath, long maxDepth) {
    int numDots = 0;
    for (int i = 0; i < objectPath.length(); ++i) {
      if (objectPath.charAt(i) == '.') {
        numDots += 1;
      }
    }
    final int depth = numDots + 2;
    if (depth > maxDepth) {
      throw new IllegalArgumentException(
          "Limit of mapping depth ["
              + maxDepth
              + "] in index ["
              + index().getName()
              + "] has been exceeded due to object field ["
              + objectPath
              + "]");
    }
  }

  /**
   * We only allow upto 1 percolator field per index.
   *
   * <p>Reasoning here is that the PercolatorQueryCache only supports a single document having a
   * percolator query. Also specifying multiple queries per document feels like an anti pattern
   */
  private void checkPercolatorFieldLimit(Iterable<MappedFieldType> fieldTypes) {
    List<String> percolatorFieldTypes = new ArrayList<>();
    for (MappedFieldType fieldType : fieldTypes) {
      if (fieldType instanceof PercolatorFieldMapper.PercolatorFieldType) {
        percolatorFieldTypes.add(fieldType.name());
      }
    }
    if (percolatorFieldTypes.size() > 1) {
      throw new IllegalArgumentException(
          "Up to one percolator field type is allowed per index, "
              + "found the following percolator fields ["
              + percolatorFieldTypes
              + "]");
    }
  }

  public DocumentMapper parse(
      String mappingType, CompressedXContent mappingSource, boolean applyDefault)
      throws MapperParsingException {
    return documentParser.parse(
        mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
  }

  public boolean hasMapping(String mappingType) {
    return mappers.containsKey(mappingType);
  }

  /**
   * Return the set of concrete types that have a mapping. NOTE: this does not return the default
   * mapping.
   */
  public Collection<String> types() {
    final Set<String> types = new HashSet<>(mappers.keySet());
    types.remove(DEFAULT_MAPPING);
    return Collections.unmodifiableSet(types);
  }

  /**
   * Return the {@link DocumentMapper} for the given type. By using the special {@value
   * #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for the default mapping.
   */
  public DocumentMapper documentMapper(String type) {
    return mappers.get(type);
  }

  /**
   * Returns the document mapper created, including a mapping update if the type has been
   * dynamically created.
   */
  public DocumentMapperForType documentMapperWithAutoCreate(String type) {
    DocumentMapper mapper = mappers.get(type);
    if (mapper != null) {
      return new DocumentMapperForType(mapper, null);
    }
    if (!dynamic) {
      throw new TypeMissingException(
          index(), type, "trying to auto create mapping, but dynamic mapping is disabled");
    }
    mapper = parse(type, null, true);
    return new DocumentMapperForType(mapper, mapper.mapping());
  }

  /**
   * Returns the {@link MappedFieldType} for the give fullName.
   *
   * <p>If multiple types have fields with the same full name, the first is returned.
   */
  public MappedFieldType fullName(String fullName) {
    return fieldTypes.get(fullName);
  }

  /**
   * Returns all the fields that match the given pattern. If the pattern is prefixed with a type
   * then the fields will be returned with a type prefix.
   */
  public Collection<String> simpleMatchToIndexNames(String pattern) {
    if (Regex.isSimpleMatchPattern(pattern) == false) {
      // no wildcards
      return Collections.singletonList(pattern);
    }
    return fieldTypes.simpleMatchToFullName(pattern);
  }

  public ObjectMapper getObjectMapper(String name) {
    return fullPathObjectMappers.get(name);
  }

  /**
   * Given a type (eg. long, string, ...), return an anonymous field mapper that can be used for
   * search operations.
   */
  public MappedFieldType unmappedFieldType(String type) {
    if (type.equals("string")) {
      deprecationLogger.deprecated(
          "[unmapped_type:string] should be replaced with [unmapped_type:keyword]");
      type = "keyword";
    }
    MappedFieldType fieldType = unmappedFieldTypes.get(type);
    if (fieldType == null) {
      final Mapper.TypeParser.ParserContext parserContext =
          documentMapperParser().parserContext(type);
      Mapper.TypeParser typeParser = parserContext.typeParser(type);
      if (typeParser == null) {
        throw new IllegalArgumentException("No mapper found for type [" + type + "]");
      }
      final Mapper.Builder<?, ?> builder =
          typeParser.parse("__anonymous_" + type, emptyMap(), parserContext);
      final BuilderContext builderContext =
          new BuilderContext(indexSettings.getSettings(), new ContentPath(1));
      fieldType = ((FieldMapper) builder.build(builderContext)).fieldType();

      // There is no need to synchronize writes here. In the case of concurrent access, we could
      // just
      // compute some mappers several times, which is not a big deal
      Map<String, MappedFieldType> newUnmappedFieldTypes = new HashMap<>();
      newUnmappedFieldTypes.putAll(unmappedFieldTypes);
      newUnmappedFieldTypes.put(type, fieldType);
      unmappedFieldTypes = unmodifiableMap(newUnmappedFieldTypes);
    }
    return fieldType;
  }

  public Analyzer indexAnalyzer() {
    return this.indexAnalyzer;
  }

  public Analyzer searchAnalyzer() {
    return this.searchAnalyzer;
  }

  public Analyzer searchQuoteAnalyzer() {
    return this.searchQuoteAnalyzer;
  }

  public Set<String> getParentTypes() {
    return parentTypes;
  }

  /** @return Whether a field is a metadata field. */
  public static boolean isMetadataField(String fieldName) {
    return META_FIELDS.contains(fieldName);
  }

  public static String[] getAllMetaFields() {
    return META_FIELDS.toArray(String.class);
  }

  /** An analyzer wrapper that can lookup fields within the index mappings */
  final class MapperAnalyzerWrapper extends DelegatingAnalyzerWrapper {

    private final Analyzer defaultAnalyzer;
    private final Function<MappedFieldType, Analyzer> extractAnalyzer;

    MapperAnalyzerWrapper(
        Analyzer defaultAnalyzer, Function<MappedFieldType, Analyzer> extractAnalyzer) {
      super(Analyzer.PER_FIELD_REUSE_STRATEGY);
      this.defaultAnalyzer = defaultAnalyzer;
      this.extractAnalyzer = extractAnalyzer;
    }

    @Override
    protected Analyzer getWrappedAnalyzer(String fieldName) {
      MappedFieldType fieldType = fullName(fieldName);
      if (fieldType != null) {
        Analyzer analyzer = extractAnalyzer.apply(fieldType);
        if (analyzer != null) {
          return analyzer;
        }
      }
      return defaultAnalyzer;
    }
  }
}
public final class HttpTransportSettings {

  public static final Setting<Boolean> SETTING_CORS_ENABLED =
      Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
  public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN =
      new Setting<String>("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER);
  public static final Setting<Integer> SETTING_CORS_MAX_AGE =
      Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
  public static final Setting<String> SETTING_CORS_ALLOW_METHODS =
      new Setting<String>(
          "http.cors.allow-methods",
          "OPTIONS, HEAD, GET, POST, PUT, DELETE",
          (value) -> value,
          false,
          Scope.CLUSTER);
  public static final Setting<String> SETTING_CORS_ALLOW_HEADERS =
      new Setting<String>(
          "http.cors.allow-headers",
          "X-Requested-With, Content-Type, Content-Length",
          (value) -> value,
          false,
          Scope.CLUSTER);
  public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS =
      Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
  public static final Setting<Boolean> SETTING_PIPELINING =
      Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
  public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS =
      Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
  public static final Setting<Boolean> SETTING_HTTP_COMPRESSION =
      Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
  public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL =
      Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
  public static final Setting<List<String>> SETTING_HTTP_HOST =
      listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER);
  public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST =
      listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
  public static final Setting<List<String>> SETTING_HTTP_BIND_HOST =
      listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);

  public static final Setting<PortsRange> SETTING_HTTP_PORT =
      new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
  public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT =
      Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
  public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED =
      Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
  public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH =
      Setting.byteSizeSetting(
          "http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER);
  public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE =
      Setting.byteSizeSetting(
          "http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER);
  public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE =
      Setting.byteSizeSetting(
          "http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER);
  public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH =
      Setting.byteSizeSetting(
          "http.max_initial_line_length",
          new ByteSizeValue(4, ByteSizeUnit.KB),
          false,
          Scope.CLUSTER);
  // don't reset cookies by default, since I don't think we really need to
  // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still,
  // currently, we don't need cookies
  public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES =
      Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER);

  private HttpTransportSettings() {}
}
Пример #7
0
/**
 * A base class for {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection} &amp; {@link
 * org.elasticsearch.discovery.zen.fd.NodesFaultDetection}, making sure both use the same setting.
 */
public abstract class FaultDetection extends AbstractComponent {

  public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING =
      Setting.boolSetting(
          "discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope);
  public static final Setting<TimeValue> PING_INTERVAL_SETTING =
      Setting.positiveTimeSetting(
          "discovery.zen.fd.ping_interval", timeValueSeconds(1), Property.NodeScope);
  public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
      Setting.timeSetting(
          "discovery.zen.fd.ping_timeout", timeValueSeconds(30), Property.NodeScope);
  public static final Setting<Integer> PING_RETRIES_SETTING =
      Setting.intSetting("discovery.zen.fd.ping_retries", 3, Property.NodeScope);
  public static final Setting<Boolean> REGISTER_CONNECTION_LISTENER_SETTING =
      Setting.boolSetting(
          "discovery.zen.fd.register_connection_listener", true, Property.NodeScope);

  protected final ThreadPool threadPool;
  protected final ClusterName clusterName;
  protected final TransportService transportService;

  // used mainly for testing, should always be true
  protected final boolean registerConnectionListener;
  protected final FDConnectionListener connectionListener;
  protected final boolean connectOnNetworkDisconnect;

  protected final TimeValue pingInterval;
  protected final TimeValue pingRetryTimeout;
  protected final int pingRetryCount;

  public FaultDetection(
      Settings settings,
      ThreadPool threadPool,
      TransportService transportService,
      ClusterName clusterName) {
    super(settings);
    this.threadPool = threadPool;
    this.transportService = transportService;
    this.clusterName = clusterName;

    this.connectOnNetworkDisconnect = CONNECT_ON_NETWORK_DISCONNECT_SETTING.get(settings);
    this.pingInterval = PING_INTERVAL_SETTING.get(settings);
    this.pingRetryTimeout = PING_TIMEOUT_SETTING.get(settings);
    this.pingRetryCount = PING_RETRIES_SETTING.get(settings);
    this.registerConnectionListener = REGISTER_CONNECTION_LISTENER_SETTING.get(settings);

    this.connectionListener = new FDConnectionListener();
    if (registerConnectionListener) {
      transportService.addConnectionListener(connectionListener);
    }
  }

  public void close() {
    transportService.removeConnectionListener(connectionListener);
  }

  /**
   * This method will be called when the {@link org.elasticsearch.transport.TransportService} raised
   * a node disconnected event
   */
  abstract void handleTransportDisconnect(DiscoveryNode node);

  private class FDConnectionListener implements TransportConnectionListener {
    @Override
    public void onNodeConnected(DiscoveryNode node) {}

    @Override
    public void onNodeDisconnected(DiscoveryNode node) {
      handleTransportDisconnect(node);
    }
  }
}
Пример #8
0
public class MetaData
    implements Iterable<IndexMetaData>,
        Diffable<MetaData>,
        FromXContentBuilder<MetaData>,
        ToXContent {

  public static final MetaData PROTO = builder().build();

  public static final String ALL = "_all";

  public enum XContentContext {
    /* Custom metadata should be returns as part of API call */
    API,

    /* Custom metadata should be stored as part of the persistent cluster state */
    GATEWAY,

    /* Custom metadata should be stored as part of a snapshot */
    SNAPSHOT
  }

  public static EnumSet<XContentContext> API_ONLY = EnumSet.of(XContentContext.API);
  public static EnumSet<XContentContext> API_AND_GATEWAY =
      EnumSet.of(XContentContext.API, XContentContext.GATEWAY);
  public static EnumSet<XContentContext> API_AND_SNAPSHOT =
      EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT);

  public interface Custom extends Diffable<Custom>, ToXContent {

    String type();

    Custom fromXContent(XContentParser parser) throws IOException;

    EnumSet<XContentContext> context();
  }

  public static Map<String, Custom> customPrototypes = new HashMap<>();

  static {
    // register non plugin custom metadata
    registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO);
    registerPrototype(IngestMetadata.TYPE, IngestMetadata.PROTO);
  }

  /** Register a custom index meta data factory. Make sure to call it from a static block. */
  public static void registerPrototype(String type, Custom proto) {
    customPrototypes.put(type, proto);
  }

  @Nullable
  public static <T extends Custom> T lookupPrototype(String type) {
    //noinspection unchecked
    return (T) customPrototypes.get(type);
  }

  public static <T extends Custom> T lookupPrototypeSafe(String type) {
    //noinspection unchecked
    T proto = (T) customPrototypes.get(type);
    if (proto == null) {
      throw new IllegalArgumentException(
          "No custom metadata prototype registered for type ["
              + type
              + "], node likely missing plugins");
    }
    return proto;
  }

  public static final Setting<Boolean> SETTING_READ_ONLY_SETTING =
      Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER);

  public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK =
      new ClusterBlock(
          6,
          "cluster read-only (api)",
          false,
          false,
          RestStatus.FORBIDDEN,
          EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));

  public static final MetaData EMPTY_META_DATA = builder().build();

  public static final String CONTEXT_MODE_PARAM = "context_mode";

  public static final String CONTEXT_MODE_SNAPSHOT = XContentContext.SNAPSHOT.toString();

  public static final String CONTEXT_MODE_GATEWAY = XContentContext.GATEWAY.toString();

  private final String clusterUUID;
  private final long version;

  private final Settings transientSettings;
  private final Settings persistentSettings;
  private final Settings settings;
  private final ImmutableOpenMap<String, IndexMetaData> indices;
  private final ImmutableOpenMap<String, IndexTemplateMetaData> templates;
  private final ImmutableOpenMap<String, Custom> customs;

  private final transient int totalNumberOfShards; // Transient ? not serializable anyway?
  private final int numberOfShards;

  private final String[] allIndices;
  private final String[] allOpenIndices;
  private final String[] allClosedIndices;

  private final SortedMap<String, AliasOrIndex> aliasAndIndexLookup;

  @SuppressWarnings("unchecked")
  MetaData(
      String clusterUUID,
      long version,
      Settings transientSettings,
      Settings persistentSettings,
      ImmutableOpenMap<String, IndexMetaData> indices,
      ImmutableOpenMap<String, IndexTemplateMetaData> templates,
      ImmutableOpenMap<String, Custom> customs,
      String[] allIndices,
      String[] allOpenIndices,
      String[] allClosedIndices,
      SortedMap<String, AliasOrIndex> aliasAndIndexLookup) {
    this.clusterUUID = clusterUUID;
    this.version = version;
    this.transientSettings = transientSettings;
    this.persistentSettings = persistentSettings;
    this.settings =
        Settings.settingsBuilder().put(persistentSettings).put(transientSettings).build();
    this.indices = indices;
    this.customs = customs;
    this.templates = templates;
    int totalNumberOfShards = 0;
    int numberOfShards = 0;
    for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
      totalNumberOfShards += cursor.value.getTotalNumberOfShards();
      numberOfShards += cursor.value.getNumberOfShards();
    }
    this.totalNumberOfShards = totalNumberOfShards;
    this.numberOfShards = numberOfShards;

    this.allIndices = allIndices;
    this.allOpenIndices = allOpenIndices;
    this.allClosedIndices = allClosedIndices;
    this.aliasAndIndexLookup = aliasAndIndexLookup;
  }

  public long version() {
    return this.version;
  }

  public String clusterUUID() {
    return this.clusterUUID;
  }

  /** Returns the merged transient and persistent settings. */
  public Settings settings() {
    return this.settings;
  }

  public Settings transientSettings() {
    return this.transientSettings;
  }

  public Settings persistentSettings() {
    return this.persistentSettings;
  }

  public boolean hasAlias(String alias) {
    AliasOrIndex aliasOrIndex = getAliasAndIndexLookup().get(alias);
    if (aliasOrIndex != null) {
      return aliasOrIndex.isAlias();
    } else {
      return false;
    }
  }

  public boolean equalsAliases(MetaData other) {
    for (ObjectCursor<IndexMetaData> cursor : other.indices().values()) {
      IndexMetaData otherIndex = cursor.value;
      IndexMetaData thisIndex = index(otherIndex.getIndex());
      if (thisIndex == null) {
        return false;
      }
      if (otherIndex.getAliases().equals(thisIndex.getAliases()) == false) {
        return false;
      }
    }

    return true;
  }

  public SortedMap<String, AliasOrIndex> getAliasAndIndexLookup() {
    return aliasAndIndexLookup;
  }

  /**
   * Finds the specific index aliases that match with the specified aliases directly or partially
   * via wildcards and that point to the specified concrete indices or match partially with the
   * indices via wildcards.
   *
   * @param aliases The names of the index aliases to find
   * @param concreteIndices The concrete indexes the index aliases must point to order to be
   *     returned.
   * @return the found index aliases grouped by index
   */
  public ImmutableOpenMap<String, List<AliasMetaData>> findAliases(
      final String[] aliases, String[] concreteIndices) {
    assert aliases != null;
    assert concreteIndices != null;
    if (concreteIndices.length == 0) {
      return ImmutableOpenMap.of();
    }

    boolean matchAllAliases = matchAllAliases(aliases);
    ImmutableOpenMap.Builder<String, List<AliasMetaData>> mapBuilder = ImmutableOpenMap.builder();
    Iterable<String> intersection =
        HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
    for (String index : intersection) {
      IndexMetaData indexMetaData = indices.get(index);
      List<AliasMetaData> filteredValues = new ArrayList<>();
      for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
        AliasMetaData value = cursor.value;
        if (matchAllAliases || Regex.simpleMatch(aliases, value.alias())) {
          filteredValues.add(value);
        }
      }

      if (!filteredValues.isEmpty()) {
        // Make the list order deterministic
        CollectionUtil.timSort(
            filteredValues,
            new Comparator<AliasMetaData>() {
              @Override
              public int compare(AliasMetaData o1, AliasMetaData o2) {
                return o1.alias().compareTo(o2.alias());
              }
            });
        mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
      }
    }
    return mapBuilder.build();
  }

  private static boolean matchAllAliases(final String[] aliases) {
    for (String alias : aliases) {
      if (alias.equals(ALL)) {
        return true;
      }
    }
    return aliases.length == 0;
  }

  /**
   * Checks if at least one of the specified aliases exists in the specified concrete indices.
   * Wildcards are supported in the alias names for partial matches.
   *
   * @param aliases The names of the index aliases to find
   * @param concreteIndices The concrete indexes the index aliases must point to order to be
   *     returned.
   * @return whether at least one of the specified aliases exists in one of the specified concrete
   *     indices.
   */
  public boolean hasAliases(final String[] aliases, String[] concreteIndices) {
    assert aliases != null;
    assert concreteIndices != null;
    if (concreteIndices.length == 0) {
      return false;
    }

    Iterable<String> intersection =
        HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
    for (String index : intersection) {
      IndexMetaData indexMetaData = indices.get(index);
      List<AliasMetaData> filteredValues = new ArrayList<>();
      for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
        AliasMetaData value = cursor.value;
        if (Regex.simpleMatch(aliases, value.alias())) {
          filteredValues.add(value);
        }
      }
      if (!filteredValues.isEmpty()) {
        return true;
      }
    }
    return false;
  }

  /*
   * Finds all mappings for types and concrete indices. Types are expanded to
   * include all types that match the glob patterns in the types array. Empty
   * types array, null or {"_all"} will be expanded to all types available for
   * the given indices.
   */
  public ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> findMappings(
      String[] concreteIndices, final String[] types) {
    assert types != null;
    assert concreteIndices != null;
    if (concreteIndices.length == 0) {
      return ImmutableOpenMap.of();
    }

    ImmutableOpenMap.Builder<String, ImmutableOpenMap<String, MappingMetaData>> indexMapBuilder =
        ImmutableOpenMap.builder();
    Iterable<String> intersection =
        HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
    for (String index : intersection) {
      IndexMetaData indexMetaData = indices.get(index);
      ImmutableOpenMap.Builder<String, MappingMetaData> filteredMappings;
      if (isAllTypes(types)) {
        indexMapBuilder.put(
            index, indexMetaData.getMappings()); // No types specified means get it all

      } else {
        filteredMappings = ImmutableOpenMap.builder();
        for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
          if (Regex.simpleMatch(types, cursor.key)) {
            filteredMappings.put(cursor.key, cursor.value);
          }
        }
        if (!filteredMappings.isEmpty()) {
          indexMapBuilder.put(index, filteredMappings.build());
        }
      }
    }
    return indexMapBuilder.build();
  }

  /** Returns all the concrete indices. */
  public String[] concreteAllIndices() {
    return allIndices;
  }

  public String[] getConcreteAllIndices() {
    return concreteAllIndices();
  }

  public String[] concreteAllOpenIndices() {
    return allOpenIndices;
  }

  public String[] getConcreteAllOpenIndices() {
    return allOpenIndices;
  }

  public String[] concreteAllClosedIndices() {
    return allClosedIndices;
  }

  public String[] getConcreteAllClosedIndices() {
    return allClosedIndices;
  }

  /** Returns indexing routing for the given index. */
  // TODO: This can be moved to IndexNameExpressionResolver too, but this means that we will support
  // wildcards and other expressions
  // in the index,bulk,update and delete apis.
  public String resolveIndexRouting(
      @Nullable String parent, @Nullable String routing, String aliasOrIndex) {
    if (aliasOrIndex == null) {
      if (routing == null) {
        return parent;
      }
      return routing;
    }

    AliasOrIndex result = getAliasAndIndexLookup().get(aliasOrIndex);
    if (result == null || result.isAlias() == false) {
      if (routing == null) {
        return parent;
      }
      return routing;
    }
    AliasOrIndex.Alias alias = (AliasOrIndex.Alias) result;
    if (result.getIndices().size() > 1) {
      String[] indexNames = new String[result.getIndices().size()];
      int i = 0;
      for (IndexMetaData indexMetaData : result.getIndices()) {
        indexNames[i++] = indexMetaData.getIndex().getName();
      }
      throw new IllegalArgumentException(
          "Alias ["
              + aliasOrIndex
              + "] has more than one index associated with it ["
              + Arrays.toString(indexNames)
              + "], can't execute a single index op");
    }
    AliasMetaData aliasMd = alias.getFirstAliasMetaData();
    if (aliasMd.indexRouting() != null) {
      if (aliasMd.indexRouting().indexOf(',') != -1) {
        throw new IllegalArgumentException(
            "index/alias ["
                + aliasOrIndex
                + "] provided with routing value ["
                + aliasMd.getIndexRouting()
                + "] that resolved to several routing values, rejecting operation");
      }
      if (routing != null) {
        if (!routing.equals(aliasMd.indexRouting())) {
          throw new IllegalArgumentException(
              "Alias ["
                  + aliasOrIndex
                  + "] has index routing associated with it ["
                  + aliasMd.indexRouting()
                  + "], and was provided with routing value ["
                  + routing
                  + "], rejecting operation");
        }
      }
      // Alias routing overrides the parent routing (if any).
      return aliasMd.indexRouting();
    }
    if (routing == null) {
      return parent;
    }
    return routing;
  }

  public boolean hasIndex(String index) {
    return indices.containsKey(index);
  }

  public boolean hasConcreteIndex(String index) {
    return getAliasAndIndexLookup().containsKey(index);
  }

  public IndexMetaData index(String index) {
    return indices.get(index);
  }

  public IndexMetaData index(Index index) {
    return index(index.getName());
  }

  public ImmutableOpenMap<String, IndexMetaData> indices() {
    return this.indices;
  }

  public ImmutableOpenMap<String, IndexMetaData> getIndices() {
    return indices();
  }

  public ImmutableOpenMap<String, IndexTemplateMetaData> templates() {
    return this.templates;
  }

  public ImmutableOpenMap<String, IndexTemplateMetaData> getTemplates() {
    return this.templates;
  }

  public ImmutableOpenMap<String, Custom> customs() {
    return this.customs;
  }

  public ImmutableOpenMap<String, Custom> getCustoms() {
    return this.customs;
  }

  public <T extends Custom> T custom(String type) {
    return (T) customs.get(type);
  }

  public int totalNumberOfShards() {
    return this.totalNumberOfShards;
  }

  public int getTotalNumberOfShards() {
    return totalNumberOfShards();
  }

  public int numberOfShards() {
    return this.numberOfShards;
  }

  public int getNumberOfShards() {
    return numberOfShards();
  }

  /**
   * Identifies whether the array containing type names given as argument refers to all types The
   * empty or null array identifies all types
   *
   * @param types the array containing types
   * @return true if the provided array maps to all types, false otherwise
   */
  public static boolean isAllTypes(String[] types) {
    return types == null || types.length == 0 || isExplicitAllType(types);
  }

  /**
   * Identifies whether the array containing type names given as argument explicitly refers to all
   * types The empty or null array doesn't explicitly map to all types
   *
   * @param types the array containing index names
   * @return true if the provided array explicitly maps to all types, false otherwise
   */
  public static boolean isExplicitAllType(String[] types) {
    return types != null && types.length == 1 && ALL.equals(types[0]);
  }

  /**
   * @param concreteIndex The concrete index to check if routing is required
   * @param type The type to check if routing is required
   * @return Whether routing is required according to the mapping for the specified index and type
   */
  public boolean routingRequired(String concreteIndex, String type) {
    IndexMetaData indexMetaData = indices.get(concreteIndex);
    if (indexMetaData != null) {
      MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type);
      if (mappingMetaData != null) {
        return mappingMetaData.routing().required();
      }
    }
    return false;
  }

  @Override
  public Iterator<IndexMetaData> iterator() {
    return indices.valuesIt();
  }

  public static boolean isGlobalStateEquals(MetaData metaData1, MetaData metaData2) {
    if (!metaData1.persistentSettings.equals(metaData2.persistentSettings)) {
      return false;
    }
    if (!metaData1.templates.equals(metaData2.templates())) {
      return false;
    }
    // Check if any persistent metadata needs to be saved
    int customCount1 = 0;
    for (ObjectObjectCursor<String, Custom> cursor : metaData1.customs) {
      if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) {
        if (!cursor.value.equals(metaData2.custom(cursor.key))) return false;
        customCount1++;
      }
    }
    int customCount2 = 0;
    for (ObjectObjectCursor<String, Custom> cursor : metaData2.customs) {
      if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) {
        customCount2++;
      }
    }
    if (customCount1 != customCount2) return false;
    return true;
  }

  @Override
  public Diff<MetaData> diff(MetaData previousState) {
    return new MetaDataDiff(previousState, this);
  }

  @Override
  public Diff<MetaData> readDiffFrom(StreamInput in) throws IOException {
    return new MetaDataDiff(in);
  }

  @Override
  public MetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
      throws IOException {
    return Builder.fromXContent(parser);
  }

  @Override
  public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
    Builder.toXContent(this, builder, params);
    return builder;
  }

  private static class MetaDataDiff implements Diff<MetaData> {

    private long version;

    private String clusterUUID;

    private Settings transientSettings;
    private Settings persistentSettings;
    private Diff<ImmutableOpenMap<String, IndexMetaData>> indices;
    private Diff<ImmutableOpenMap<String, IndexTemplateMetaData>> templates;
    private Diff<ImmutableOpenMap<String, Custom>> customs;

    public MetaDataDiff(MetaData before, MetaData after) {
      clusterUUID = after.clusterUUID;
      version = after.version;
      transientSettings = after.transientSettings;
      persistentSettings = after.persistentSettings;
      indices =
          DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
      templates =
          DiffableUtils.diff(
              before.templates, after.templates, DiffableUtils.getStringKeySerializer());
      customs =
          DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
    }

    public MetaDataDiff(StreamInput in) throws IOException {
      clusterUUID = in.readString();
      version = in.readLong();
      transientSettings = Settings.readSettingsFromStream(in);
      persistentSettings = Settings.readSettingsFromStream(in);
      indices =
          DiffableUtils.readImmutableOpenMapDiff(
              in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
      templates =
          DiffableUtils.readImmutableOpenMapDiff(
              in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
      customs =
          DiffableUtils.readImmutableOpenMapDiff(
              in,
              DiffableUtils.getStringKeySerializer(),
              new DiffableUtils.DiffableValueSerializer<String, Custom>() {
                @Override
                public Custom read(StreamInput in, String key) throws IOException {
                  return lookupPrototypeSafe(key).readFrom(in);
                }

                @Override
                public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
                  return lookupPrototypeSafe(key).readDiffFrom(in);
                }
              });
    }

    @Override
    public void writeTo(StreamOutput out) throws IOException {
      out.writeString(clusterUUID);
      out.writeLong(version);
      Settings.writeSettingsToStream(transientSettings, out);
      Settings.writeSettingsToStream(persistentSettings, out);
      indices.writeTo(out);
      templates.writeTo(out);
      customs.writeTo(out);
    }

    @Override
    public MetaData apply(MetaData part) {
      Builder builder = builder();
      builder.clusterUUID(clusterUUID);
      builder.version(version);
      builder.transientSettings(transientSettings);
      builder.persistentSettings(persistentSettings);
      builder.indices(indices.apply(part.indices));
      builder.templates(templates.apply(part.templates));
      builder.customs(customs.apply(part.customs));
      return builder.build();
    }
  }

  @Override
  public MetaData readFrom(StreamInput in) throws IOException {
    Builder builder = new Builder();
    builder.version = in.readLong();
    builder.clusterUUID = in.readString();
    builder.transientSettings(readSettingsFromStream(in));
    builder.persistentSettings(readSettingsFromStream(in));
    int size = in.readVInt();
    for (int i = 0; i < size; i++) {
      builder.put(IndexMetaData.Builder.readFrom(in), false);
    }
    size = in.readVInt();
    for (int i = 0; i < size; i++) {
      builder.put(IndexTemplateMetaData.Builder.readFrom(in));
    }
    int customSize = in.readVInt();
    for (int i = 0; i < customSize; i++) {
      String type = in.readString();
      Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
      builder.putCustom(type, customIndexMetaData);
    }
    return builder.build();
  }

  @Override
  public void writeTo(StreamOutput out) throws IOException {
    out.writeLong(version);
    out.writeString(clusterUUID);
    writeSettingsToStream(transientSettings, out);
    writeSettingsToStream(persistentSettings, out);
    out.writeVInt(indices.size());
    for (IndexMetaData indexMetaData : this) {
      indexMetaData.writeTo(out);
    }
    out.writeVInt(templates.size());
    for (ObjectCursor<IndexTemplateMetaData> cursor : templates.values()) {
      cursor.value.writeTo(out);
    }
    out.writeVInt(customs.size());
    for (ObjectObjectCursor<String, Custom> cursor : customs) {
      out.writeString(cursor.key);
      cursor.value.writeTo(out);
    }
  }

  public static Builder builder() {
    return new Builder();
  }

  public static Builder builder(MetaData metaData) {
    return new Builder(metaData);
  }

  /** All known byte-sized cluster settings. */
  public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS =
      unmodifiableSet(
          newHashSet(
              IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()));

  /** All known time cluster settings. */
  public static final Set<String> CLUSTER_TIME_SETTINGS =
      unmodifiableSet(
          newHashSet(
              IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(),
              RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(),
              DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(),
              InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(),
              InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(),
              DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(),
              InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey()));

  /**
   * As of 2.0 we require units for time and byte-sized settings. This methods adds default units to
   * any cluster settings that don't specify a unit.
   */
  public static MetaData addDefaultUnitsIfNeeded(ESLogger logger, MetaData metaData) {
    Settings.Builder newPersistentSettings = null;
    for (Map.Entry<String, String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
      String settingName = ent.getKey();
      String settingValue = ent.getValue();
      if (CLUSTER_BYTES_SIZE_SETTINGS.contains(settingName)) {
        try {
          Long.parseLong(settingValue);
        } catch (NumberFormatException nfe) {
          continue;
        }
        // It's a naked number that previously would be interpreted as default unit (bytes); now we
        // add it:
        logger.warn(
            "byte-sized cluster setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error",
            settingName,
            settingValue);
        if (newPersistentSettings == null) {
          newPersistentSettings = Settings.builder();
          newPersistentSettings.put(metaData.persistentSettings());
        }
        newPersistentSettings.put(settingName, settingValue + "b");
      }
      if (CLUSTER_TIME_SETTINGS.contains(settingName)) {
        try {
          Long.parseLong(settingValue);
        } catch (NumberFormatException nfe) {
          continue;
        }
        // It's a naked number that previously would be interpreted as default unit (ms); now we add
        // it:
        logger.warn(
            "time cluster setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error",
            settingName,
            settingValue);
        if (newPersistentSettings == null) {
          newPersistentSettings = Settings.builder();
          newPersistentSettings.put(metaData.persistentSettings());
        }
        newPersistentSettings.put(settingName, settingValue + "ms");
      }
    }

    if (newPersistentSettings != null) {
      return new MetaData(
          metaData.clusterUUID(),
          metaData.version(),
          metaData.transientSettings(),
          newPersistentSettings.build(),
          metaData.getIndices(),
          metaData.getTemplates(),
          metaData.getCustoms(),
          metaData.concreteAllIndices(),
          metaData.concreteAllOpenIndices(),
          metaData.concreteAllClosedIndices(),
          metaData.getAliasAndIndexLookup());
    } else {
      // No changes:
      return metaData;
    }
  }

  public static class Builder {

    private String clusterUUID;
    private long version;

    private Settings transientSettings = Settings.Builder.EMPTY_SETTINGS;
    private Settings persistentSettings = Settings.Builder.EMPTY_SETTINGS;

    private final ImmutableOpenMap.Builder<String, IndexMetaData> indices;
    private final ImmutableOpenMap.Builder<String, IndexTemplateMetaData> templates;
    private final ImmutableOpenMap.Builder<String, Custom> customs;

    public Builder() {
      clusterUUID = "_na_";
      indices = ImmutableOpenMap.builder();
      templates = ImmutableOpenMap.builder();
      customs = ImmutableOpenMap.builder();
    }

    public Builder(MetaData metaData) {
      this.clusterUUID = metaData.clusterUUID;
      this.transientSettings = metaData.transientSettings;
      this.persistentSettings = metaData.persistentSettings;
      this.version = metaData.version;
      this.indices = ImmutableOpenMap.builder(metaData.indices);
      this.templates = ImmutableOpenMap.builder(metaData.templates);
      this.customs = ImmutableOpenMap.builder(metaData.customs);
    }

    public Builder put(IndexMetaData.Builder indexMetaDataBuilder) {
      // we know its a new one, increment the version and store
      indexMetaDataBuilder.version(indexMetaDataBuilder.version() + 1);
      IndexMetaData indexMetaData = indexMetaDataBuilder.build();
      indices.put(indexMetaData.getIndex().getName(), indexMetaData);
      return this;
    }

    public Builder put(IndexMetaData indexMetaData, boolean incrementVersion) {
      if (indices.get(indexMetaData.getIndex().getName()) == indexMetaData) {
        return this;
      }
      // if we put a new index metadata, increment its version
      if (incrementVersion) {
        indexMetaData =
            IndexMetaData.builder(indexMetaData).version(indexMetaData.getVersion() + 1).build();
      }
      indices.put(indexMetaData.getIndex().getName(), indexMetaData);
      return this;
    }

    public IndexMetaData get(String index) {
      return indices.get(index);
    }

    public Builder remove(String index) {
      indices.remove(index);
      return this;
    }

    public Builder removeAllIndices() {
      indices.clear();
      return this;
    }

    public Builder indices(ImmutableOpenMap<String, IndexMetaData> indices) {
      this.indices.putAll(indices);
      return this;
    }

    public Builder put(IndexTemplateMetaData.Builder template) {
      return put(template.build());
    }

    public Builder put(IndexTemplateMetaData template) {
      templates.put(template.name(), template);
      return this;
    }

    public Builder removeTemplate(String templateName) {
      templates.remove(templateName);
      return this;
    }

    public Builder templates(ImmutableOpenMap<String, IndexTemplateMetaData> templates) {
      this.templates.putAll(templates);
      return this;
    }

    public Custom getCustom(String type) {
      return customs.get(type);
    }

    public Builder putCustom(String type, Custom custom) {
      customs.put(type, custom);
      return this;
    }

    public Builder removeCustom(String type) {
      customs.remove(type);
      return this;
    }

    public Builder customs(ImmutableOpenMap<String, Custom> customs) {
      this.customs.putAll(customs);
      return this;
    }

    public Builder updateSettings(Settings settings, String... indices) {
      if (indices == null || indices.length == 0) {
        indices = this.indices.keys().toArray(String.class);
      }
      for (String index : indices) {
        IndexMetaData indexMetaData = this.indices.get(index);
        if (indexMetaData == null) {
          throw new IndexNotFoundException(index);
        }
        put(
            IndexMetaData.builder(indexMetaData)
                .settings(settingsBuilder().put(indexMetaData.getSettings()).put(settings)));
      }
      return this;
    }

    public Builder updateNumberOfReplicas(int numberOfReplicas, String... indices) {
      if (indices == null || indices.length == 0) {
        indices = this.indices.keys().toArray(String.class);
      }
      for (String index : indices) {
        IndexMetaData indexMetaData = this.indices.get(index);
        if (indexMetaData == null) {
          throw new IndexNotFoundException(index);
        }
        put(IndexMetaData.builder(indexMetaData).numberOfReplicas(numberOfReplicas));
      }
      return this;
    }

    public Settings transientSettings() {
      return this.transientSettings;
    }

    public Builder transientSettings(Settings settings) {
      this.transientSettings = settings;
      return this;
    }

    public Settings persistentSettings() {
      return this.persistentSettings;
    }

    public Builder persistentSettings(Settings settings) {
      this.persistentSettings = settings;
      return this;
    }

    public Builder version(long version) {
      this.version = version;
      return this;
    }

    public Builder clusterUUID(String clusterUUID) {
      this.clusterUUID = clusterUUID;
      return this;
    }

    public Builder generateClusterUuidIfNeeded() {
      if (clusterUUID.equals("_na_")) {
        clusterUUID = Strings.randomBase64UUID();
      }
      return this;
    }

    public MetaData build() {
      // TODO: We should move these datastructures to IndexNameExpressionResolver, this will give
      // the following benefits:
      // 1) The datastructures will only be rebuilded when needed. Now during serializing we rebuild
      // these datastructures
      //    while these datastructures aren't even used.
      // 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.

      // build all concrete indices arrays:
      // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all
      // indices.
      // When doing an operation across all indices, most of the time is spent on actually going to
      // all shards and
      // do the required operations, the bottleneck isn't resolving expressions into concrete
      // indices.
      List<String> allIndicesLst = new ArrayList<>();
      for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
        allIndicesLst.add(cursor.value.getIndex().getName());
      }
      String[] allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]);

      List<String> allOpenIndicesLst = new ArrayList<>();
      List<String> allClosedIndicesLst = new ArrayList<>();
      for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
        IndexMetaData indexMetaData = cursor.value;
        if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
          allOpenIndicesLst.add(indexMetaData.getIndex().getName());
        } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
          allClosedIndicesLst.add(indexMetaData.getIndex().getName());
        }
      }
      String[] allOpenIndices = allOpenIndicesLst.toArray(new String[allOpenIndicesLst.size()]);
      String[] allClosedIndices =
          allClosedIndicesLst.toArray(new String[allClosedIndicesLst.size()]);

      // build all indices map
      SortedMap<String, AliasOrIndex> aliasAndIndexLookup = new TreeMap<>();
      for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
        IndexMetaData indexMetaData = cursor.value;
        aliasAndIndexLookup.put(
            indexMetaData.getIndex().getName(), new AliasOrIndex.Index(indexMetaData));

        for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
          AliasMetaData aliasMetaData = aliasCursor.value;
          AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
          if (aliasOrIndex == null) {
            aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
            aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
          } else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
            AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
            alias.addIndex(indexMetaData);
          } else if (aliasOrIndex instanceof AliasOrIndex.Index) {
            AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
            throw new IllegalStateException(
                "index and alias names need to be unique, but alias ["
                    + aliasMetaData.getAlias()
                    + "] and index "
                    + index.getIndex().getIndex()
                    + " have the same name");
          } else {
            throw new IllegalStateException(
                "unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
          }
        }
      }
      aliasAndIndexLookup = Collections.unmodifiableSortedMap(aliasAndIndexLookup);
      return new MetaData(
          clusterUUID,
          version,
          transientSettings,
          persistentSettings,
          indices.build(),
          templates.build(),
          customs.build(),
          allIndices,
          allOpenIndices,
          allClosedIndices,
          aliasAndIndexLookup);
    }

    public static String toXContent(MetaData metaData) throws IOException {
      XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
      builder.startObject();
      toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
      builder.endObject();
      return builder.string();
    }

    public static void toXContent(
        MetaData metaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
      XContentContext context = XContentContext.valueOf(params.param(CONTEXT_MODE_PARAM, "API"));

      builder.startObject("meta-data");

      builder.field("version", metaData.version());
      builder.field("cluster_uuid", metaData.clusterUUID);

      if (!metaData.persistentSettings().getAsMap().isEmpty()) {
        builder.startObject("settings");
        for (Map.Entry<String, String> entry :
            metaData.persistentSettings().getAsMap().entrySet()) {
          builder.field(entry.getKey(), entry.getValue());
        }
        builder.endObject();
      }

      if (context == XContentContext.API && !metaData.transientSettings().getAsMap().isEmpty()) {
        builder.startObject("transient_settings");
        for (Map.Entry<String, String> entry : metaData.transientSettings().getAsMap().entrySet()) {
          builder.field(entry.getKey(), entry.getValue());
        }
        builder.endObject();
      }

      builder.startObject("templates");
      for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
        IndexTemplateMetaData.Builder.toXContent(cursor.value, builder, params);
      }
      builder.endObject();

      if (context == XContentContext.API && !metaData.indices().isEmpty()) {
        builder.startObject("indices");
        for (IndexMetaData indexMetaData : metaData) {
          IndexMetaData.Builder.toXContent(indexMetaData, builder, params);
        }
        builder.endObject();
      }

      for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) {
        Custom proto = lookupPrototypeSafe(cursor.key);
        if (proto.context().contains(context)) {
          builder.startObject(cursor.key);
          cursor.value.toXContent(builder, params);
          builder.endObject();
        }
      }
      builder.endObject();
    }

    public static MetaData fromXContent(XContentParser parser) throws IOException {
      Builder builder = new Builder();

      // we might get here after the meta-data element, or on a fresh parser
      XContentParser.Token token = parser.currentToken();
      String currentFieldName = parser.currentName();
      if (!"meta-data".equals(currentFieldName)) {
        token = parser.nextToken();
        if (token == XContentParser.Token.START_OBJECT) {
          // move to the field name (meta-data)
          token = parser.nextToken();
          if (token != XContentParser.Token.FIELD_NAME) {
            throw new IllegalArgumentException("Expected a field name but got " + token);
          }
          // move to the next object
          token = parser.nextToken();
        }
        currentFieldName = parser.currentName();
      }

      if (!"meta-data".equals(parser.currentName())) {
        throw new IllegalArgumentException(
            "Expected [meta-data] as a field name but got " + currentFieldName);
      }
      if (token != XContentParser.Token.START_OBJECT) {
        throw new IllegalArgumentException("Expected a START_OBJECT but got " + token);
      }

      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
          if ("settings".equals(currentFieldName)) {
            builder.persistentSettings(
                Settings.settingsBuilder()
                    .put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()))
                    .build());
          } else if ("indices".equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
              builder.put(IndexMetaData.Builder.fromXContent(parser), false);
            }
          } else if ("templates".equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
              builder.put(IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName()));
            }
          } else {
            // check if its a custom index metadata
            Custom proto = lookupPrototype(currentFieldName);
            if (proto == null) {
              // TODO warn
              parser.skipChildren();
            } else {
              Custom custom = proto.fromXContent(parser);
              builder.putCustom(custom.type(), custom);
            }
          }
        } else if (token.isValue()) {
          if ("version".equals(currentFieldName)) {
            builder.version = parser.longValue();
          } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) {
            builder.clusterUUID = parser.text();
          } else {
            throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
          }
        } else {
          throw new IllegalArgumentException("Unexpected token " + token);
        }
      }
      return builder.build();
    }

    public static MetaData readFrom(StreamInput in) throws IOException {
      return PROTO.readFrom(in);
    }
  }
}
Пример #9
0
/**
 * Serves as a node level registry for hunspell dictionaries. This services expects all dictionaries
 * to be located under the {@code <path.conf>/hunspell} directory, where each locale has its
 * dedicated sub-directory which holds the dictionary files. For example, the dictionary files for
 * {@code en_US} locale must be placed under {@code <path.conf>/hunspell/en_US} directory.
 *
 * <p>The following settings can be set for each dictionary:
 *
 * <ul>
 *   <li>{@code ignore_case} - If true, dictionary matching will be case insensitive (defaults to
 *       {@code false})
 *   <li>{@code strict_affix_parsing} - Determines whether errors while reading a affix rules file
 *       will cause exception or simple be ignored (defaults to {@code true})
 * </ul>
 *
 * <p>These settings can either be configured as node level configuration, such as: <br>
 * <br>
 *
 * <pre><code>
 *     indices.analysis.hunspell.dictionary.en_US.ignore_case: true
 *     indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing: false
 * </code></pre>
 *
 * <p>or, as dedicated configuration per dictionary, placed in a {@code settings.yml} file under the
 * dictionary directory. For example, the following can be the content of the {@code
 * <path.config>/hunspell/en_US/settings.yml} file: <br>
 * <br>
 *
 * <pre><code>
 *     ignore_case: true
 *     strict_affix_parsing: false
 * </code></pre>
 *
 * @see org.elasticsearch.index.analysis.HunspellTokenFilterFactory
 */
public class HunspellService extends AbstractComponent {

  public static final Setting<Boolean> HUNSPELL_LAZY_LOAD =
      Setting.boolSetting(
          "indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope);
  public static final Setting<Boolean> HUNSPELL_IGNORE_CASE =
      Setting.boolSetting(
          "indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, Property.NodeScope);
  public static final Setting<Settings> HUNSPELL_DICTIONARY_OPTIONS =
      Setting.groupSetting("indices.analysis.hunspell.dictionary.", Property.NodeScope);
  private final ConcurrentHashMap<String, Dictionary> dictionaries = new ConcurrentHashMap<>();
  private final Map<String, Dictionary> knownDictionaries;
  private final boolean defaultIgnoreCase;
  private final Path hunspellDir;
  private final Function<String, Dictionary> loadingFunction;

  public HunspellService(
      final Settings settings,
      final Environment env,
      final Map<String, Dictionary> knownDictionaries)
      throws IOException {
    super(settings);
    this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
    this.hunspellDir = resolveHunspellDirectory(env);
    this.defaultIgnoreCase = HUNSPELL_IGNORE_CASE.get(settings);
    this.loadingFunction =
        (locale) -> {
          try {
            return loadDictionary(locale, settings, env);
          } catch (Throwable e) {
            throw new IllegalStateException(
                "failed to load hunspell dictionary for locale: " + locale, e);
          }
        };
    if (!HUNSPELL_LAZY_LOAD.get(settings)) {
      scanAndLoadDictionaries();
    }
  }

  /**
   * Returns the hunspell dictionary for the given locale.
   *
   * @param locale The name of the locale
   */
  public Dictionary getDictionary(String locale) {
    Dictionary dictionary = knownDictionaries.get(locale);
    if (dictionary == null) {
      dictionary = dictionaries.computeIfAbsent(locale, loadingFunction);
    }
    return dictionary;
  }

  private Path resolveHunspellDirectory(Environment env) {
    return env.configFile().resolve("hunspell");
  }

  /** Scans the hunspell directory and loads all found dictionaries */
  private void scanAndLoadDictionaries() throws IOException {
    if (Files.isDirectory(hunspellDir)) {
      try (DirectoryStream<Path> stream = Files.newDirectoryStream(hunspellDir)) {
        for (Path file : stream) {
          if (Files.isDirectory(file)) {
            try (DirectoryStream<Path> inner =
                Files.newDirectoryStream(hunspellDir.resolve(file), "*.dic")) {
              if (inner.iterator().hasNext()) { // just making sure it's indeed a dictionary dir
                try {
                  getDictionary(file.getFileName().toString());
                } catch (Throwable e) {
                  // The cache loader throws unchecked exception (see #loadDictionary()),
                  // here we simply report the exception and continue loading the dictionaries
                  logger.error("exception while loading dictionary {}", e, file.getFileName());
                }
              }
            }
          }
        }
      }
    }
  }

  /**
   * Loads the hunspell dictionary for the given local.
   *
   * @param locale The locale of the hunspell dictionary to be loaded.
   * @param nodeSettings The node level settings
   * @param env The node environment (from which the conf path will be resolved)
   * @return The loaded Hunspell dictionary
   * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
   */
  private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env)
      throws Exception {
    if (logger.isDebugEnabled()) {
      logger.debug("Loading hunspell dictionary [{}]...", locale);
    }
    Path dicDir = hunspellDir.resolve(locale);
    if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
    }

    // merging node settings with hunspell dictionary specific settings
    Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
    nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));

    boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

    Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
    if (affixFiles.length == 0) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
    }
    if (affixFiles.length != 1) {
      throw new ElasticsearchException(
          String.format(
              Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
    }
    InputStream affixStream = null;

    Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
    List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
    try {

      for (int i = 0; i < dicFiles.length; i++) {
        dicStreams.add(Files.newInputStream(dicFiles[i]));
      }

      affixStream = Files.newInputStream(affixFiles[0]);

      try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
        return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
      }

    } catch (Exception e) {
      logger.error("Could not load hunspell dictionary [{}]", e, locale);
      throw e;
    } finally {
      IOUtils.close(affixStream);
      IOUtils.close(dicStreams);
    }
  }

  /**
   * Each hunspell dictionary directory may contain a {@code settings.yml} which holds dictionary
   * specific settings. Default values for these settings are defined in the given default settings.
   *
   * @param dir The directory of the dictionary
   * @param defaults The default settings for this dictionary
   * @return The resolved settings.
   */
  private static Settings loadDictionarySettings(Path dir, Settings defaults) {
    Path file = dir.resolve("settings.yml");
    if (Files.exists(file)) {
      return Settings.builder().loadFromPath(file).put(defaults).build();
    }

    file = dir.resolve("settings.json");
    if (Files.exists(file)) {
      return Settings.builder().loadFromPath(file).put(defaults).build();
    }

    return defaults;
  }
}
Пример #10
0
public class IndexMetaData
    implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {

  public interface Custom extends Diffable<Custom>, ToXContent {

    String type();

    Custom fromMap(Map<String, Object> map) throws IOException;

    Custom fromXContent(XContentParser parser) throws IOException;

    /**
     * Merges from this to another, with this being more important, i.e., if something exists in
     * this and another, this will prevail.
     */
    Custom mergeWith(Custom another);
  }

  public static Map<String, Custom> customPrototypes = new HashMap<>();

  /** Register a custom index meta data factory. Make sure to call it from a static block. */
  public static void registerPrototype(String type, Custom proto) {
    customPrototypes.put(type, proto);
  }

  @Nullable
  public static <T extends Custom> T lookupPrototype(String type) {
    //noinspection unchecked
    return (T) customPrototypes.get(type);
  }

  public static <T extends Custom> T lookupPrototypeSafe(String type) {
    //noinspection unchecked
    T proto = (T) customPrototypes.get(type);
    if (proto == null) {
      throw new IllegalArgumentException(
          "No custom metadata prototype registered for type [" + type + "]");
    }
    return proto;
  }

  public static final ClusterBlock INDEX_READ_ONLY_BLOCK =
      new ClusterBlock(
          5,
          "index read-only (api)",
          false,
          false,
          RestStatus.FORBIDDEN,
          EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
  public static final ClusterBlock INDEX_READ_BLOCK =
      new ClusterBlock(
          7,
          "index read (api)",
          false,
          false,
          RestStatus.FORBIDDEN,
          EnumSet.of(ClusterBlockLevel.READ));
  public static final ClusterBlock INDEX_WRITE_BLOCK =
      new ClusterBlock(
          8,
          "index write (api)",
          false,
          false,
          RestStatus.FORBIDDEN,
          EnumSet.of(ClusterBlockLevel.WRITE));
  public static final ClusterBlock INDEX_METADATA_BLOCK =
      new ClusterBlock(
          9,
          "index metadata (api)",
          false,
          false,
          RestStatus.FORBIDDEN,
          EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));

  public static enum State {
    OPEN((byte) 0),
    CLOSE((byte) 1);

    private final byte id;

    State(byte id) {
      this.id = id;
    }

    public byte id() {
      return this.id;
    }

    public static State fromId(byte id) {
      if (id == 0) {
        return OPEN;
      } else if (id == 1) {
        return CLOSE;
      }
      throw new IllegalStateException("No state match for id [" + id + "]");
    }

    public static State fromString(String state) {
      if ("open".equals(state)) {
        return OPEN;
      } else if ("close".equals(state)) {
        return CLOSE;
      }
      throw new IllegalStateException("No state match for [" + state + "]");
    }
  }

  public static final String INDEX_SETTING_PREFIX = "index.";
  public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
  public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING =
      Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
  public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
  public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
      Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
  public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
  public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING =
      Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);

  public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
  public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING =
      Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);

  public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
  public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING =
      AutoExpandReplicas.SETTING;
  public static final String SETTING_READ_ONLY = "index.blocks.read_only";
  public static final Setting<Boolean> INDEX_READ_ONLY_SETTING =
      Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);

  public static final String SETTING_BLOCKS_READ = "index.blocks.read";
  public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING =
      Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);

  public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
  public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING =
      Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);

  public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
  public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING =
      Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);

  public static final String SETTING_VERSION_CREATED = "index.version.created";
  public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
  public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
  public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string";
  public static final String SETTING_VERSION_MINIMUM_COMPATIBLE =
      "index.version.minimum_compatible";
  public static final String SETTING_CREATION_DATE = "index.creation_date";
  public static final String SETTING_PRIORITY = "index.priority";
  public static final Setting<Integer> INDEX_PRIORITY_SETTING =
      Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
  public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
  public static final String SETTING_INDEX_UUID = "index.uuid";
  public static final String SETTING_DATA_PATH = "index.data_path";
  public static final Setting<String> INDEX_DATA_PATH_SETTING =
      new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
  public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE =
      "index.shared_filesystem.recover_on_any_node";
  public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING =
      Setting.boolSetting(
          SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
  public static final String INDEX_UUID_NA_VALUE = "_na_";

  public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
      Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
  public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
      Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
  public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
      Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);

  public static final IndexMetaData PROTO =
      IndexMetaData.builder("")
          .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
          .numberOfShards(1)
          .numberOfReplicas(0)
          .build();

  public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";

  private final int numberOfShards;
  private final int numberOfReplicas;

  private final Index index;
  private final long version;

  private final State state;

  private final ImmutableOpenMap<String, AliasMetaData> aliases;

  private final Settings settings;

  private final ImmutableOpenMap<String, MappingMetaData> mappings;

  private final ImmutableOpenMap<String, Custom> customs;

  private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;

  private final transient int totalNumberOfShards;

  private final DiscoveryNodeFilters requireFilters;
  private final DiscoveryNodeFilters includeFilters;
  private final DiscoveryNodeFilters excludeFilters;

  private final Version indexCreatedVersion;
  private final Version indexUpgradedVersion;
  private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;

  private IndexMetaData(
      Index index,
      long version,
      State state,
      int numberOfShards,
      int numberOfReplicas,
      Settings settings,
      ImmutableOpenMap<String, MappingMetaData> mappings,
      ImmutableOpenMap<String, AliasMetaData> aliases,
      ImmutableOpenMap<String, Custom> customs,
      ImmutableOpenIntMap<Set<String>> activeAllocationIds,
      DiscoveryNodeFilters requireFilters,
      DiscoveryNodeFilters includeFilters,
      DiscoveryNodeFilters excludeFilters,
      Version indexCreatedVersion,
      Version indexUpgradedVersion,
      org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {

    this.index = index;
    this.version = version;
    this.state = state;
    this.numberOfShards = numberOfShards;
    this.numberOfReplicas = numberOfReplicas;
    this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
    this.settings = settings;
    this.mappings = mappings;
    this.customs = customs;
    this.aliases = aliases;
    this.activeAllocationIds = activeAllocationIds;
    this.requireFilters = requireFilters;
    this.includeFilters = includeFilters;
    this.excludeFilters = excludeFilters;
    this.indexCreatedVersion = indexCreatedVersion;
    this.indexUpgradedVersion = indexUpgradedVersion;
    this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
  }

  public Index getIndex() {
    return index;
  }

  public String getIndexUUID() {
    return index.getUUID();
  }

  /**
   * Test whether the current index UUID is the same as the given one. Returns true if either are
   * _na_
   */
  public boolean isSameUUID(String otherUUID) {
    assert otherUUID != null;
    assert getIndexUUID() != null;
    if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(getIndexUUID())) {
      return true;
    }
    return otherUUID.equals(getIndexUUID());
  }

  public long getVersion() {
    return this.version;
  }

  /**
   * Return the {@link Version} on which this index has been created. This information is typically
   * useful for backward compatibility.
   */
  public Version getCreationVersion() {
    return indexCreatedVersion;
  }

  /**
   * Return the {@link Version} on which this index has been upgraded. This information is typically
   * useful for backward compatibility.
   */
  public Version getUpgradedVersion() {
    return indexUpgradedVersion;
  }

  /** Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index */
  public org.apache.lucene.util.Version getMinimumCompatibleVersion() {
    return minimumCompatibleLuceneVersion;
  }

  public long getCreationDate() {
    return settings.getAsLong(SETTING_CREATION_DATE, -1l);
  }

  public State getState() {
    return this.state;
  }

  public int getNumberOfShards() {
    return numberOfShards;
  }

  public int getNumberOfReplicas() {
    return numberOfReplicas;
  }

  public int getTotalNumberOfShards() {
    return totalNumberOfShards;
  }

  public Settings getSettings() {
    return settings;
  }

  public ImmutableOpenMap<String, AliasMetaData> getAliases() {
    return this.aliases;
  }

  public ImmutableOpenMap<String, MappingMetaData> getMappings() {
    return mappings;
  }

  @Nullable
  public MappingMetaData mapping(String mappingType) {
    return mappings.get(mappingType);
  }

  /**
   * Sometimes, the default mapping exists and an actual mapping is not created yet (introduced), in
   * this case, we want to return the default mapping in case it has some default mapping
   * definitions.
   *
   * <p>Note, once the mapping type is introduced, the default mapping is applied on the actual
   * typed MappingMetaData, setting its routing, timestamp, and so on if needed.
   */
  @Nullable
  public MappingMetaData mappingOrDefault(String mappingType) {
    MappingMetaData mapping = mappings.get(mappingType);
    if (mapping != null) {
      return mapping;
    }
    return mappings.get(MapperService.DEFAULT_MAPPING);
  }

  public ImmutableOpenMap<String, Custom> getCustoms() {
    return this.customs;
  }

  @SuppressWarnings("unchecked")
  public <T extends Custom> T custom(String type) {
    return (T) customs.get(type);
  }

  public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
    return activeAllocationIds;
  }

  public Set<String> activeAllocationIds(int shardId) {
    assert shardId >= 0 && shardId < numberOfShards;
    return activeAllocationIds.get(shardId);
  }

  @Nullable
  public DiscoveryNodeFilters requireFilters() {
    return requireFilters;
  }

  @Nullable
  public DiscoveryNodeFilters includeFilters() {
    return includeFilters;
  }

  @Nullable
  public DiscoveryNodeFilters excludeFilters() {
    return excludeFilters;
  }

  @Override
  public boolean equals(Object o) {
    if (this == o) {
      return true;
    }
    if (o == null || getClass() != o.getClass()) {
      return false;
    }

    IndexMetaData that = (IndexMetaData) o;

    if (!aliases.equals(that.aliases)) {
      return false;
    }
    if (!index.equals(that.index)) {
      return false;
    }
    if (!mappings.equals(that.mappings)) {
      return false;
    }
    if (!settings.equals(that.settings)) {
      return false;
    }
    if (state != that.state) {
      return false;
    }
    if (!customs.equals(that.customs)) {
      return false;
    }
    if (!activeAllocationIds.equals(that.activeAllocationIds)) {
      return false;
    }
    return true;
  }

  @Override
  public int hashCode() {
    int result = index.hashCode();
    result = 31 * result + state.hashCode();
    result = 31 * result + aliases.hashCode();
    result = 31 * result + settings.hashCode();
    result = 31 * result + mappings.hashCode();
    result = 31 * result + activeAllocationIds.hashCode();
    return result;
  }

  @Override
  public Diff<IndexMetaData> diff(IndexMetaData previousState) {
    return new IndexMetaDataDiff(previousState, this);
  }

  @Override
  public Diff<IndexMetaData> readDiffFrom(StreamInput in) throws IOException {
    return new IndexMetaDataDiff(in);
  }

  @Override
  public IndexMetaData fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher)
      throws IOException {
    return Builder.fromXContent(parser);
  }

  @Override
  public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
    Builder.toXContent(this, builder, params);
    return builder;
  }

  private static class IndexMetaDataDiff implements Diff<IndexMetaData> {

    private final String index;
    private final long version;
    private final State state;
    private final Settings settings;
    private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
    private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
    private final Diff<ImmutableOpenMap<String, Custom>> customs;
    private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;

    public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
      index = after.index.getName();
      version = after.version;
      state = after.state;
      settings = after.settings;
      mappings =
          DiffableUtils.diff(
              before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
      aliases =
          DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
      customs =
          DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
      activeAllocationIds =
          DiffableUtils.diff(
              before.activeAllocationIds,
              after.activeAllocationIds,
              DiffableUtils.getVIntKeySerializer(),
              DiffableUtils.StringSetValueSerializer.getInstance());
    }

    public IndexMetaDataDiff(StreamInput in) throws IOException {
      index = in.readString();
      version = in.readLong();
      state = State.fromId(in.readByte());
      settings = Settings.readSettingsFromStream(in);
      mappings =
          DiffableUtils.readImmutableOpenMapDiff(
              in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
      aliases =
          DiffableUtils.readImmutableOpenMapDiff(
              in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
      customs =
          DiffableUtils.readImmutableOpenMapDiff(
              in,
              DiffableUtils.getStringKeySerializer(),
              new DiffableUtils.DiffableValueSerializer<String, Custom>() {
                @Override
                public Custom read(StreamInput in, String key) throws IOException {
                  return lookupPrototypeSafe(key).readFrom(in);
                }

                @Override
                public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
                  return lookupPrototypeSafe(key).readDiffFrom(in);
                }
              });
      activeAllocationIds =
          DiffableUtils.readImmutableOpenIntMapDiff(
              in,
              DiffableUtils.getVIntKeySerializer(),
              DiffableUtils.StringSetValueSerializer.getInstance());
    }

    @Override
    public void writeTo(StreamOutput out) throws IOException {
      out.writeString(index);
      out.writeLong(version);
      out.writeByte(state.id);
      Settings.writeSettingsToStream(settings, out);
      mappings.writeTo(out);
      aliases.writeTo(out);
      customs.writeTo(out);
      activeAllocationIds.writeTo(out);
    }

    @Override
    public IndexMetaData apply(IndexMetaData part) {
      Builder builder = builder(index);
      builder.version(version);
      builder.state(state);
      builder.settings(settings);
      builder.mappings.putAll(mappings.apply(part.mappings));
      builder.aliases.putAll(aliases.apply(part.aliases));
      builder.customs.putAll(customs.apply(part.customs));
      builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
      return builder.build();
    }
  }

  @Override
  public IndexMetaData readFrom(StreamInput in) throws IOException {
    Builder builder = new Builder(in.readString());
    builder.version(in.readLong());
    builder.state(State.fromId(in.readByte()));
    builder.settings(readSettingsFromStream(in));
    int mappingsSize = in.readVInt();
    for (int i = 0; i < mappingsSize; i++) {
      MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in);
      builder.putMapping(mappingMd);
    }
    int aliasesSize = in.readVInt();
    for (int i = 0; i < aliasesSize; i++) {
      AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
      builder.putAlias(aliasMd);
    }
    int customSize = in.readVInt();
    for (int i = 0; i < customSize; i++) {
      String type = in.readString();
      Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
      builder.putCustom(type, customIndexMetaData);
    }
    int activeAllocationIdsSize = in.readVInt();
    for (int i = 0; i < activeAllocationIdsSize; i++) {
      int key = in.readVInt();
      Set<String> allocationIds =
          DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
      builder.putActiveAllocationIds(key, allocationIds);
    }
    return builder.build();
  }

  @Override
  public void writeTo(StreamOutput out) throws IOException {
    out.writeString(index.getName()); // uuid will come as part of settings
    out.writeLong(version);
    out.writeByte(state.id());
    writeSettingsToStream(settings, out);
    out.writeVInt(mappings.size());
    for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
      cursor.value.writeTo(out);
    }
    out.writeVInt(aliases.size());
    for (ObjectCursor<AliasMetaData> cursor : aliases.values()) {
      cursor.value.writeTo(out);
    }
    out.writeVInt(customs.size());
    for (ObjectObjectCursor<String, Custom> cursor : customs) {
      out.writeString(cursor.key);
      cursor.value.writeTo(out);
    }
    out.writeVInt(activeAllocationIds.size());
    for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
      out.writeVInt(cursor.key);
      DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
    }
  }

  public static Builder builder(String index) {
    return new Builder(index);
  }

  public static Builder builder(IndexMetaData indexMetaData) {
    return new Builder(indexMetaData);
  }

  public static class Builder {

    private String index;
    private State state = State.OPEN;
    private long version = 1;
    private Settings settings = Settings.Builder.EMPTY_SETTINGS;
    private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
    private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
    private final ImmutableOpenMap.Builder<String, Custom> customs;
    private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;

    public Builder(String index) {
      this.index = index;
      this.mappings = ImmutableOpenMap.builder();
      this.aliases = ImmutableOpenMap.builder();
      this.customs = ImmutableOpenMap.builder();
      this.activeAllocationIds = ImmutableOpenIntMap.builder();
    }

    public Builder(IndexMetaData indexMetaData) {
      this.index = indexMetaData.getIndex().getName();
      this.state = indexMetaData.state;
      this.version = indexMetaData.version;
      this.settings = indexMetaData.getSettings();
      this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
      this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
      this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
      this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
    }

    public String index() {
      return index;
    }

    public Builder index(String index) {
      this.index = index;
      return this;
    }

    public Builder numberOfShards(int numberOfShards) {
      settings =
          settingsBuilder().put(settings).put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
      return this;
    }

    public int numberOfShards() {
      return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
    }

    public Builder numberOfReplicas(int numberOfReplicas) {
      settings =
          settingsBuilder().put(settings).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
      return this;
    }

    public int numberOfReplicas() {
      return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
    }

    public Builder creationDate(long creationDate) {
      settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
      return this;
    }

    public Builder settings(Settings.Builder settings) {
      this.settings = settings.build();
      return this;
    }

    public Builder settings(Settings settings) {
      this.settings = settings;
      return this;
    }

    public MappingMetaData mapping(String type) {
      return mappings.get(type);
    }

    public Builder putMapping(String type, String source) throws IOException {
      try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
        putMapping(new MappingMetaData(type, parser.mapOrdered()));
      }
      return this;
    }

    public Builder putMapping(MappingMetaData mappingMd) {
      mappings.put(mappingMd.type(), mappingMd);
      return this;
    }

    public Builder state(State state) {
      this.state = state;
      return this;
    }

    public Builder putAlias(AliasMetaData aliasMetaData) {
      aliases.put(aliasMetaData.alias(), aliasMetaData);
      return this;
    }

    public Builder putAlias(AliasMetaData.Builder aliasMetaData) {
      aliases.put(aliasMetaData.alias(), aliasMetaData.build());
      return this;
    }

    public Builder removeAlias(String alias) {
      aliases.remove(alias);
      return this;
    }

    public Builder removeAllAliases() {
      aliases.clear();
      return this;
    }

    public Builder putCustom(String type, Custom customIndexMetaData) {
      this.customs.put(type, customIndexMetaData);
      return this;
    }

    public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
      activeAllocationIds.put(shardId, new HashSet(allocationIds));
      return this;
    }

    public long version() {
      return this.version;
    }

    public Builder version(long version) {
      this.version = version;
      return this;
    }

    public IndexMetaData build() {
      ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
      Settings tmpSettings = settings;

      // update default mapping on the MappingMetaData
      if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) {
        MappingMetaData defaultMapping = mappings.get(MapperService.DEFAULT_MAPPING);
        for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
          cursor.value.updateDefaultMapping(defaultMapping);
        }
      }

      Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
      if (maybeNumberOfShards == null) {
        throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
      }
      int numberOfShards = maybeNumberOfShards;
      if (numberOfShards <= 0) {
        throw new IllegalArgumentException(
            "must specify positive number of shards for index [" + index + "]");
      }

      Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
      if (maybeNumberOfReplicas == null) {
        throw new IllegalArgumentException(
            "must specify numberOfReplicas for index [" + index + "]");
      }
      int numberOfReplicas = maybeNumberOfReplicas;
      if (numberOfReplicas < 0) {
        throw new IllegalArgumentException(
            "must specify non-negative number of shards for index [" + index + "]");
      }

      // fill missing slots in activeAllocationIds with empty set if needed and make all entries
      // immutable
      ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds =
          ImmutableOpenIntMap.builder();
      for (int i = 0; i < numberOfShards; i++) {
        if (activeAllocationIds.containsKey(i)) {
          filledActiveAllocationIds.put(
              i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
        } else {
          filledActiveAllocationIds.put(i, Collections.emptySet());
        }
      }
      final Map<String, String> requireMap =
          INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters requireFilters;
      if (requireMap.isEmpty()) {
        requireFilters = null;
      } else {
        requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
      }
      Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters includeFilters;
      if (includeMap.isEmpty()) {
        includeFilters = null;
      } else {
        includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
      }
      Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
      final DiscoveryNodeFilters excludeFilters;
      if (excludeMap.isEmpty()) {
        excludeFilters = null;
      } else {
        excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
      }
      Version indexCreatedVersion = Version.indexCreated(settings);
      Version indexUpgradedVersion =
          settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
      String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
      final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
      if (stringLuceneVersion != null) {
        try {
          minimumCompatibleLuceneVersion =
              org.apache.lucene.util.Version.parse(stringLuceneVersion);
        } catch (ParseException ex) {
          throw new IllegalStateException(
              "Cannot parse lucene version ["
                  + stringLuceneVersion
                  + "] in the ["
                  + SETTING_VERSION_MINIMUM_COMPATIBLE
                  + "] setting",
              ex);
        }
      } else {
        minimumCompatibleLuceneVersion = null;
      }

      final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
      return new IndexMetaData(
          new Index(index, uuid),
          version,
          state,
          numberOfShards,
          numberOfReplicas,
          tmpSettings,
          mappings.build(),
          tmpAliases.build(),
          customs.build(),
          filledActiveAllocationIds.build(),
          requireFilters,
          includeFilters,
          excludeFilters,
          indexCreatedVersion,
          indexUpgradedVersion,
          minimumCompatibleLuceneVersion);
    }

    public static void toXContent(
        IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params)
        throws IOException {
      builder.startObject(
          indexMetaData.getIndex().getName(), XContentBuilder.FieldCaseConversion.NONE);

      builder.field("version", indexMetaData.getVersion());
      builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));

      boolean binary = params.paramAsBoolean("binary", false);

      builder.startObject("settings");
      for (Map.Entry<String, String> entry : indexMetaData.getSettings().getAsMap().entrySet()) {
        builder.field(entry.getKey(), entry.getValue());
      }
      builder.endObject();

      builder.startArray("mappings");
      for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
        if (binary) {
          builder.value(cursor.value.source().compressed());
        } else {
          byte[] data = cursor.value.source().uncompressed();
          XContentParser parser = XContentFactory.xContent(data).createParser(data);
          Map<String, Object> mapping = parser.mapOrdered();
          parser.close();
          builder.map(mapping);
        }
      }
      builder.endArray();

      for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.getCustoms()) {
        builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
        cursor.value.toXContent(builder, params);
        builder.endObject();
      }

      builder.startObject("aliases");
      for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
        AliasMetaData.Builder.toXContent(cursor.value, builder, params);
      }
      builder.endObject();

      builder.startObject(KEY_ACTIVE_ALLOCATIONS);
      for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
        builder.startArray(String.valueOf(cursor.key));
        for (String allocationId : cursor.value) {
          builder.value(allocationId);
        }
        builder.endArray();
      }
      builder.endObject();

      builder.endObject();
    }

    public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
      if (parser.currentToken() == null) { // fresh parser? move to the first token
        parser.nextToken();
      }
      if (parser.currentToken()
          == XContentParser.Token.START_OBJECT) { // on a start object move to next token
        parser.nextToken();
      }
      if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
        throw new IllegalArgumentException(
            "expected field name but got a " + parser.currentToken());
      }
      Builder builder = new Builder(parser.currentName());

      String currentFieldName = null;
      XContentParser.Token token = parser.nextToken();
      if (token != XContentParser.Token.START_OBJECT) {
        throw new IllegalArgumentException("expected object but got a " + token);
      }
      while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
          currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
          if ("settings".equals(currentFieldName)) {
            builder.settings(
                Settings.settingsBuilder()
                    .put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
          } else if ("mappings".equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
              if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
              } else if (token == XContentParser.Token.START_OBJECT) {
                String mappingType = currentFieldName;
                Map<String, Object> mappingSource =
                    MapBuilder.<String, Object>newMapBuilder()
                        .put(mappingType, parser.mapOrdered())
                        .map();
                builder.putMapping(new MappingMetaData(mappingType, mappingSource));
              } else {
                throw new IllegalArgumentException("Unexpected token: " + token);
              }
            }
          } else if ("aliases".equals(currentFieldName)) {
            while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
              builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
            }
          } else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
              if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
              } else if (token == XContentParser.Token.START_ARRAY) {
                String shardId = currentFieldName;
                Set<String> allocationIds = new HashSet<>();
                while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                  if (token == XContentParser.Token.VALUE_STRING) {
                    allocationIds.add(parser.text());
                  }
                }
                builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
              } else {
                throw new IllegalArgumentException("Unexpected token: " + token);
              }
            }
          } else if ("warmers".equals(currentFieldName)) {
            // TODO: do this in 4.0:
            // throw new IllegalArgumentException("Warmers are not supported anymore - are you
            // upgrading from 1.x?");
            // ignore: warmers have been removed in 3.0 and are
            // simply ignored when upgrading from 2.x
            assert Version.CURRENT.major <= 3;
            parser.skipChildren();
          } else {
            // check if its a custom index metadata
            Custom proto = lookupPrototype(currentFieldName);
            if (proto == null) {
              // TODO warn
              parser.skipChildren();
            } else {
              Custom custom = proto.fromXContent(parser);
              builder.putCustom(custom.type(), custom);
            }
          }
        } else if (token == XContentParser.Token.START_ARRAY) {
          if ("mappings".equals(currentFieldName)) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
              if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
                builder.putMapping(
                    new MappingMetaData(new CompressedXContent(parser.binaryValue())));
              } else {
                Map<String, Object> mapping = parser.mapOrdered();
                if (mapping.size() == 1) {
                  String mappingType = mapping.keySet().iterator().next();
                  builder.putMapping(new MappingMetaData(mappingType, mapping));
                }
              }
            }
          } else {
            throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName);
          }
        } else if (token.isValue()) {
          if ("state".equals(currentFieldName)) {
            builder.state(State.fromString(parser.text()));
          } else if ("version".equals(currentFieldName)) {
            builder.version(parser.longValue());
          } else {
            throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
          }
        } else {
          throw new IllegalArgumentException("Unexpected token " + token);
        }
      }
      return builder.build();
    }

    public static IndexMetaData readFrom(StreamInput in) throws IOException {
      return PROTO.readFrom(in);
    }
  }

  /**
   * Returns <code>true</code> iff the given settings indicate that the index associated with these
   * settings allocates it's shards on a shared filesystem. Otherwise <code>false</code>. The
   * default setting for this is the returned value from {@link
   * #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}.
   */
  public static boolean isOnSharedFilesystem(Settings settings) {
    return settings.getAsBoolean(SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));
  }

  /**
   * Returns <code>true</code> iff the given settings indicate that the index associated with these
   * settings uses shadow replicas. Otherwise <code>false</code>. The default setting for this is
   * <code>false</code>.
   */
  public static boolean isIndexUsingShadowReplicas(Settings settings) {
    return settings.getAsBoolean(SETTING_SHADOW_REPLICAS, false);
  }

  /**
   * Adds human readable version and creation date settings. This method is used to display the
   * settings in a human readable format in REST API
   */
  public static Settings addHumanReadableSettings(Settings settings) {
    Settings.Builder builder = Settings.builder().put(settings);
    Version version = settings.getAsVersion(SETTING_VERSION_CREATED, null);
    if (version != null) {
      builder.put(SETTING_VERSION_CREATED_STRING, version.toString());
    }
    Version versionUpgraded = settings.getAsVersion(SETTING_VERSION_UPGRADED, null);
    if (versionUpgraded != null) {
      builder.put(SETTING_VERSION_UPGRADED_STRING, versionUpgraded.toString());
    }
    Long creationDate = settings.getAsLong(SETTING_CREATION_DATE, null);
    if (creationDate != null) {
      DateTime creationDateTime = new DateTime(creationDate, DateTimeZone.UTC);
      builder.put(SETTING_CREATION_DATE_STRING, creationDateTime.toString());
    }
    return builder.build();
  }
}
Пример #11
0
/** A module to handle registering and binding all network related classes. */
public final class NetworkModule {

  public static final String TRANSPORT_TYPE_KEY = "transport.type";
  public static final String HTTP_TYPE_KEY = "http.type";
  public static final String LOCAL_TRANSPORT = "local";
  public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
  public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default";

  public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING =
      Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY, Property.NodeScope);
  public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING =
      Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
  public static final Setting<String> HTTP_TYPE_SETTING =
      Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
  public static final Setting<Boolean> HTTP_ENABLED =
      Setting.boolSetting("http.enabled", true, Property.NodeScope);
  public static final Setting<String> TRANSPORT_TYPE_SETTING =
      Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);

  private final Settings settings;
  private final boolean transportClient;

  private static final AllocationCommandRegistry allocationCommandRegistry =
      new AllocationCommandRegistry();
  private static final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();

  private final Map<String, Supplier<Transport>> transportFactories = new HashMap<>();
  private final Map<String, Supplier<HttpServerTransport>> transportHttpFactories = new HashMap<>();
  private final List<TransportInterceptor> transportIntercetors = new ArrayList<>();

  static {
    registerAllocationCommand(
        CancelAllocationCommand::new,
        CancelAllocationCommand::fromXContent,
        CancelAllocationCommand.COMMAND_NAME_FIELD);
    registerAllocationCommand(
        MoveAllocationCommand::new,
        MoveAllocationCommand::fromXContent,
        MoveAllocationCommand.COMMAND_NAME_FIELD);
    registerAllocationCommand(
        AllocateReplicaAllocationCommand::new,
        AllocateReplicaAllocationCommand::fromXContent,
        AllocateReplicaAllocationCommand.COMMAND_NAME_FIELD);
    registerAllocationCommand(
        AllocateEmptyPrimaryAllocationCommand::new,
        AllocateEmptyPrimaryAllocationCommand::fromXContent,
        AllocateEmptyPrimaryAllocationCommand.COMMAND_NAME_FIELD);
    registerAllocationCommand(
        AllocateStalePrimaryAllocationCommand::new,
        AllocateStalePrimaryAllocationCommand::fromXContent,
        AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD);
    namedWriteables.add(
        new NamedWriteableRegistry.Entry(
            Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
    namedWriteables.add(
        new NamedWriteableRegistry.Entry(
            Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
  }
  /**
   * Creates a network module that custom networking classes can be plugged into.
   *
   * @param settings The settings for the node
   * @param transportClient True if only transport classes should be allowed to be registered, false
   *     otherwise.
   */
  public NetworkModule(
      Settings settings,
      boolean transportClient,
      List<NetworkPlugin> plugins,
      ThreadPool threadPool,
      BigArrays bigArrays,
      CircuitBreakerService circuitBreakerService,
      NamedWriteableRegistry namedWriteableRegistry,
      NetworkService networkService) {
    this.settings = settings;
    this.transportClient = transportClient;
    for (NetworkPlugin plugin : plugins) {
      if (transportClient == false && HTTP_ENABLED.get(settings)) {
        Map<String, Supplier<HttpServerTransport>> httpTransportFactory =
            plugin.getHttpTransports(
                settings,
                threadPool,
                bigArrays,
                circuitBreakerService,
                namedWriteableRegistry,
                networkService);
        for (Map.Entry<String, Supplier<HttpServerTransport>> entry :
            httpTransportFactory.entrySet()) {
          registerHttpTransport(entry.getKey(), entry.getValue());
        }
      }
      Map<String, Supplier<Transport>> httpTransportFactory =
          plugin.getTransports(
              settings,
              threadPool,
              bigArrays,
              circuitBreakerService,
              namedWriteableRegistry,
              networkService);
      for (Map.Entry<String, Supplier<Transport>> entry : httpTransportFactory.entrySet()) {
        registerTransport(entry.getKey(), entry.getValue());
      }
      List<TransportInterceptor> transportInterceptors =
          plugin.getTransportInterceptors(namedWriteableRegistry);
      for (TransportInterceptor interceptor : transportInterceptors) {
        registerTransportInterceptor(interceptor);
      }
    }
  }

  public boolean isTransportClient() {
    return transportClient;
  }

  /**
   * Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}.
   */
  private void registerTransport(String key, Supplier<Transport> factory) {
    if (transportFactories.putIfAbsent(key, factory) != null) {
      throw new IllegalArgumentException("transport for name: " + key + " is already registered");
    }
  }

  /**
   * Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}.
   */
  // TODO: we need another name than "http transport"....so confusing with transportClient...
  private void registerHttpTransport(String key, Supplier<HttpServerTransport> factory) {
    if (transportClient) {
      throw new IllegalArgumentException(
          "Cannot register http transport " + key + " for transport client");
    }
    if (transportHttpFactories.putIfAbsent(key, factory) != null) {
      throw new IllegalArgumentException("transport for name: " + key + " is already registered");
    }
  }

  /**
   * Register an allocation command.
   *
   * <p>This lives here instead of the more aptly named ClusterModule because the Transport client
   * needs these to be registered.
   *
   * @param reader the reader to read it from a stream
   * @param parser the parser to read it from XContent
   * @param commandName the names under which the command should be parsed. The {@link
   *     ParseField#getPreferredName()} is special because it is the name under which the command's
   *     reader is registered.
   */
  private static <T extends AllocationCommand> void registerAllocationCommand(
      Writeable.Reader<T> reader, AllocationCommand.Parser<T> parser, ParseField commandName) {
    allocationCommandRegistry.register(parser, commandName);
    namedWriteables.add(new Entry(AllocationCommand.class, commandName.getPreferredName(), reader));
  }

  /** The registry of allocation command parsers. */
  public static AllocationCommandRegistry getAllocationCommandRegistry() {
    return allocationCommandRegistry;
  }

  public static List<Entry> getNamedWriteables() {
    return Collections.unmodifiableList(namedWriteables);
  }

  public Supplier<HttpServerTransport> getHttpServerTransportSupplier() {
    final String name;
    if (HTTP_TYPE_SETTING.exists(settings)) {
      name = HTTP_TYPE_SETTING.get(settings);
    } else {
      name = HTTP_DEFAULT_TYPE_SETTING.get(settings);
    }
    final Supplier<HttpServerTransport> factory = transportHttpFactories.get(name);
    if (factory == null) {
      throw new IllegalStateException("Unsupported http.type [" + name + "]");
    }
    return factory;
  }

  public boolean isHttpEnabled() {
    return transportClient == false && HTTP_ENABLED.get(settings);
  }

  public Supplier<Transport> getTransportSupplier() {
    final String name;
    if (TRANSPORT_TYPE_SETTING.exists(settings)) {
      name = TRANSPORT_TYPE_SETTING.get(settings);
    } else {
      name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings);
    }
    final Supplier<Transport> factory = transportFactories.get(name);
    if (factory == null) {
      throw new IllegalStateException("Unsupported transport.type [" + name + "]");
    }
    return factory;
  }

  /** Registers a new {@link TransportInterceptor} */
  private void registerTransportInterceptor(TransportInterceptor interceptor) {
    this.transportIntercetors.add(
        Objects.requireNonNull(interceptor, "interceptor must not be null"));
  }

  /**
   * Returns a composite {@link TransportInterceptor} containing all registered interceptors
   *
   * @see #registerTransportInterceptor(TransportInterceptor)
   */
  public TransportInterceptor getTransportInterceptor() {
    return new CompositeTransportInterceptor(this.transportIntercetors);
  }

  static final class CompositeTransportInterceptor implements TransportInterceptor {
    final List<TransportInterceptor> transportInterceptors;

    private CompositeTransportInterceptor(List<TransportInterceptor> transportInterceptors) {
      this.transportInterceptors = new ArrayList<>(transportInterceptors);
    }

    @Override
    public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(
        String action, String executor, TransportRequestHandler<T> actualHandler) {
      for (TransportInterceptor interceptor : this.transportInterceptors) {
        actualHandler = interceptor.interceptHandler(action, executor, actualHandler);
      }
      return actualHandler;
    }

    @Override
    public AsyncSender interceptSender(AsyncSender sender) {
      for (TransportInterceptor interceptor : this.transportInterceptors) {
        sender = interceptor.interceptSender(sender);
      }
      return sender;
    }
  }
}
Пример #12
0
/** A component that holds all data paths for a single node. */
public final class NodeEnvironment implements Closeable {

  private final Logger logger;

  public static class NodePath {
    /* ${data.paths}/nodes/{node.id} */
    public final Path path;
    /* ${data.paths}/nodes/{node.id}/indices */
    public final Path indicesPath;
    /** Cached FileStore from path */
    public final FileStore fileStore;
    /**
     * Cached result of Lucene's {@code IOUtils.spins} on path. This is a trilean value: null means
     * we could not determine it (we are not running on Linux, or we hit an exception trying), True
     * means the device possibly spins and False means it does not.
     */
    public final Boolean spins;

    public final int majorDeviceNumber;
    public final int minorDeviceNumber;

    public NodePath(Path path) throws IOException {
      this.path = path;
      this.indicesPath = path.resolve(INDICES_FOLDER);
      this.fileStore = Environment.getFileStore(path);
      if (fileStore.supportsFileAttributeView("lucene")) {
        this.spins = (Boolean) fileStore.getAttribute("lucene:spins");
        this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
        this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
      } else {
        this.spins = null;
        this.majorDeviceNumber = -1;
        this.minorDeviceNumber = -1;
      }
    }

    /**
     * Resolves the given shards directory against this NodePath
     * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id}
     */
    public Path resolve(ShardId shardId) {
      return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id()));
    }

    /**
     * Resolves index directory against this NodePath
     * ${data.paths}/nodes/{node.id}/indices/{index.uuid}
     */
    public Path resolve(Index index) {
      return indicesPath.resolve(index.getUUID());
    }

    @Override
    public String toString() {
      return "NodePath{" + "path=" + path + ", spins=" + spins + '}';
    }
  }

  private final NodePath[] nodePaths;
  private final Path sharedDataPath;
  private final Lock[] locks;

  private final int nodeLockId;
  private final AtomicBoolean closed = new AtomicBoolean(false);
  private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>();

  private final NodeMetaData nodeMetaData;

  /** Maximum number of data nodes that should run in an environment. */
  public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING =
      Setting.intSetting("node.max_local_storage_nodes", 1, 1, Property.NodeScope);

  /** If true automatically append node lock id to custom data paths. */
  public static final Setting<Boolean> ADD_NODE_LOCK_ID_TO_CUSTOM_PATH =
      Setting.boolSetting("node.add_lock_id_to_custom_path", true, Property.NodeScope);

  /**
   * Seed for determining a persisted unique uuid of this node. If the node has already a persisted
   * uuid on disk, this seed will be ignored and the uuid from disk will be reused.
   */
  public static final Setting<Long> NODE_ID_SEED_SETTING =
      Setting.longSetting("node.id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);

  /** If true the [verbose] SegmentInfos.infoStream logging is sent to System.out. */
  public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING =
      Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope);

  public static final String NODES_FOLDER = "nodes";
  public static final String INDICES_FOLDER = "indices";
  public static final String NODE_LOCK_FILENAME = "node.lock";

  public NodeEnvironment(Settings settings, Environment environment) throws IOException {

    if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
      nodePaths = null;
      sharedDataPath = null;
      locks = null;
      nodeLockId = -1;
      nodeMetaData = new NodeMetaData(generateNodeId(settings));
      logger =
          Loggers.getLogger(
              getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
      return;
    }
    final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
    final Lock[] locks = new Lock[nodePaths.length];
    boolean success = false;

    // trace logger to debug issues before the default node name is derived from the node id
    Logger startupTraceLogger = Loggers.getLogger(getClass(), settings);

    try {
      sharedDataPath = environment.sharedDataFile();
      int nodeLockId = -1;
      IOException lastException = null;
      int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
      for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
        for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
          Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
          Path dataDir = environment.dataFiles()[dirIndex];
          Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
          Files.createDirectories(dir);

          try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
            startupTraceLogger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
            try {
              locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
              nodePaths[dirIndex] = new NodePath(dir);
              nodeLockId = possibleLockId;
            } catch (LockObtainFailedException ex) {
              startupTraceLogger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
              // release all the ones that were obtained up until now
              releaseAndNullLocks(locks);
              break;
            }

          } catch (IOException e) {
            startupTraceLogger.trace(
                (Supplier<?>)
                    () ->
                        new ParameterizedMessage(
                            "failed to obtain node lock on {}", dir.toAbsolutePath()),
                e);
            lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
            // release all the ones that were obtained up until now
            releaseAndNullLocks(locks);
            break;
          }
        }
        if (locks[0] != null) {
          // we found a lock, break
          break;
        }
      }

      if (locks[0] == null) {
        final String message =
            String.format(
                Locale.ROOT,
                "failed to obtain node locks, tried [%s] with lock id%s;"
                    + " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?",
                Arrays.toString(environment.dataWithClusterFiles()),
                maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]",
                MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
                maxLocalStorageNodes);
        throw new IllegalStateException(message, lastException);
      }
      this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
      this.logger =
          Loggers.getLogger(
              getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));

      this.nodeLockId = nodeLockId;
      this.locks = locks;
      this.nodePaths = nodePaths;

      if (logger.isDebugEnabled()) {
        logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId);
      }

      maybeLogPathDetails();
      maybeLogHeapDetails();

      applySegmentInfosTrace(settings);
      assertCanWrite();
      success = true;
    } finally {
      if (success == false) {
        IOUtils.closeWhileHandlingException(locks);
      }
    }
  }

  /** Returns true if the directory is empty */
  private static boolean dirEmpty(final Path path) throws IOException {
    try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
      return stream.iterator().hasNext() == false;
    }
  }

  private static void releaseAndNullLocks(Lock[] locks) {
    for (int i = 0; i < locks.length; i++) {
      if (locks[i] != null) {
        IOUtils.closeWhileHandlingException(locks[i]);
      }
      locks[i] = null;
    }
  }

  private void maybeLogPathDetails() throws IOException {

    // We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
    if (logger.isDebugEnabled()) {
      // Log one line per path.data:
      StringBuilder sb = new StringBuilder();
      for (NodePath nodePath : nodePaths) {
        sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());

        String spinsDesc;
        if (nodePath.spins == null) {
          spinsDesc = "unknown";
        } else if (nodePath.spins) {
          spinsDesc = "possibly";
        } else {
          spinsDesc = "no";
        }

        FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
        sb.append(", free_space [")
            .append(fsPath.getFree())
            .append("], usable_space [")
            .append(fsPath.getAvailable())
            .append("], total_space [")
            .append(fsPath.getTotal())
            .append("], spins? [")
            .append(spinsDesc)
            .append("], mount [")
            .append(fsPath.getMount())
            .append("], type [")
            .append(fsPath.getType())
            .append(']');
      }
      logger.debug("node data locations details:{}", sb);
    } else if (logger.isInfoEnabled()) {
      FsInfo.Path totFSPath = new FsInfo.Path();
      Set<String> allTypes = new HashSet<>();
      Set<String> allSpins = new HashSet<>();
      Set<String> allMounts = new HashSet<>();
      for (NodePath nodePath : nodePaths) {
        FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
        String mount = fsPath.getMount();
        if (allMounts.contains(mount) == false) {
          allMounts.add(mount);
          String type = fsPath.getType();
          if (type != null) {
            allTypes.add(type);
          }
          Boolean spins = fsPath.getSpins();
          if (spins == null) {
            allSpins.add("unknown");
          } else if (spins.booleanValue()) {
            allSpins.add("possibly");
          } else {
            allSpins.add("no");
          }
          totFSPath.add(fsPath);
        }
      }

      // Just log a 1-line summary:
      logger.info(
          "using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
          nodePaths.length,
          allMounts,
          totFSPath.getAvailable(),
          totFSPath.getTotal(),
          toString(allSpins),
          toString(allTypes));
    }
  }

  private void maybeLogHeapDetails() {
    JvmInfo jvmInfo = JvmInfo.jvmInfo();
    ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
    String useCompressedOops = jvmInfo.useCompressedOops();
    logger.info(
        "heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
  }

  /**
   * scans the node paths and loads existing metaData file. If not found a new meta data will be
   * generated and persisted into the nodePaths
   */
  private static NodeMetaData loadOrCreateNodeMetaData(
      Settings settings, Logger logger, NodePath... nodePaths) throws IOException {
    final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
    NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths);
    if (metaData == null) {
      metaData = new NodeMetaData(generateNodeId(settings));
    }
    // we write again to make sure all paths have the latest state file
    NodeMetaData.FORMAT.write(metaData, paths);
    return metaData;
  }

  public static String generateNodeId(Settings settings) {
    Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
    return UUIDs.randomBase64UUID(random);
  }

  @SuppressForbidden(reason = "System.out.*")
  static void applySegmentInfosTrace(Settings settings) {
    if (ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.get(settings)) {
      SegmentInfos.setInfoStream(System.out);
    }
  }

  private static String toString(Collection<String> items) {
    StringBuilder b = new StringBuilder();
    for (String item : items) {
      if (b.length() > 0) {
        b.append(", ");
      }
      b.append(item);
    }
    return b.toString();
  }

  /**
   * Deletes a shard data directory iff the shards locks were successfully acquired.
   *
   * @param shardId the id of the shard to delete to delete
   * @throws IOException if an IOException occurs
   */
  public void deleteShardDirectorySafe(ShardId shardId, IndexSettings indexSettings)
      throws IOException, ShardLockObtainFailedException {
    final Path[] paths = availableShardPaths(shardId);
    logger.trace("deleting shard {} directory, paths: [{}]", shardId, paths);
    try (ShardLock lock = shardLock(shardId)) {
      deleteShardDirectoryUnderLock(lock, indexSettings);
    }
  }

  /**
   * Acquires, then releases, all {@code write.lock} files in the given shard paths. The
   * "write.lock" file is assumed to be under the shard path's "index" directory as used by
   * Elasticsearch.
   *
   * @throws LockObtainFailedException if any of the locks could not be acquired
   */
  public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... shardPaths)
      throws IOException {
    Lock[] locks = new Lock[shardPaths.length];
    Directory[] dirs = new Directory[shardPaths.length];
    try {
      for (int i = 0; i < shardPaths.length; i++) {
        // resolve the directory the shard actually lives in
        Path p = shardPaths[i].resolve("index");
        // open a directory (will be immediately closed) on the shard's location
        dirs[i] =
            new SimpleFSDirectory(
                p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING));
        // create a lock for the "write.lock" file
        try {
          locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
        } catch (IOException ex) {
          throw new LockObtainFailedException(
              "unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p, ex);
        }
      }
    } finally {
      IOUtils.closeWhileHandlingException(locks);
      IOUtils.closeWhileHandlingException(dirs);
    }
  }

  /**
   * Deletes a shard data directory. Note: this method assumes that the shard lock is acquired. This
   * method will also attempt to acquire the write locks for the shard's paths before deleting the
   * data, but this is best effort, as the lock is released before the deletion happens in order to
   * allow the folder to be deleted
   *
   * @param lock the shards lock
   * @throws IOException if an IOException occurs
   * @throws ElasticsearchException if the write.lock is not acquirable
   */
  public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings)
      throws IOException {
    final ShardId shardId = lock.getShardId();
    assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
    final Path[] paths = availableShardPaths(shardId);
    logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
    acquireFSLockForPaths(indexSettings, paths);
    IOUtils.rm(paths);
    if (indexSettings.hasCustomDataPath()) {
      Path customLocation = resolveCustomLocation(indexSettings, shardId);
      logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
      acquireFSLockForPaths(indexSettings, customLocation);
      logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
      IOUtils.rm(customLocation);
    }
    logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
    assert FileSystemUtils.exists(paths) == false;
  }

  private boolean isShardLocked(ShardId id) {
    try {
      shardLock(id, 0).close();
      return false;
    } catch (ShardLockObtainFailedException ex) {
      return true;
    }
  }

  /**
   * Deletes an indexes data directory recursively iff all of the indexes shards locks were
   * successfully acquired. If any of the indexes shard directories can't be locked non of the
   * shards will be deleted
   *
   * @param index the index to delete
   * @param lockTimeoutMS how long to wait for acquiring the indices shard locks
   * @param indexSettings settings for the index being deleted
   * @throws IOException if any of the shards data directories can't be locked or deleted
   */
  public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSettings indexSettings)
      throws IOException, ShardLockObtainFailedException {
    final List<ShardLock> locks = lockAllForIndex(index, indexSettings, lockTimeoutMS);
    try {
      deleteIndexDirectoryUnderLock(index, indexSettings);
    } finally {
      IOUtils.closeWhileHandlingException(locks);
    }
  }

  /**
   * Deletes an indexes data directory recursively. Note: this method assumes that the shard lock is
   * acquired
   *
   * @param index the index to delete
   * @param indexSettings settings for the index being deleted
   */
  public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings)
      throws IOException {
    final Path[] indexPaths = indexPaths(index);
    logger.trace(
        "deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths);
    IOUtils.rm(indexPaths);
    if (indexSettings.hasCustomDataPath()) {
      Path customLocation = resolveIndexCustomLocation(indexSettings);
      logger.trace("deleting custom index {} directory [{}]", index, customLocation);
      IOUtils.rm(customLocation);
    }
  }

  /**
   * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired
   * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are
   * released.
   *
   * @param index the index to lock shards for
   * @param lockTimeoutMS how long to wait for acquiring the indices shard locks
   * @return the {@link ShardLock} instances for this index.
   * @throws IOException if an IOException occurs.
   */
  public List<ShardLock> lockAllForIndex(Index index, IndexSettings settings, long lockTimeoutMS)
      throws IOException, ShardLockObtainFailedException {
    final int numShards = settings.getNumberOfShards();
    if (numShards <= 0) {
      throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards");
    }
    logger.trace("locking all shards for index {} - [{}]", index, numShards);
    List<ShardLock> allLocks = new ArrayList<>(numShards);
    boolean success = false;
    long startTimeNS = System.nanoTime();
    try {
      for (int i = 0; i < numShards; i++) {
        long timeoutLeftMS =
            Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS)));
        allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS));
      }
      success = true;
    } finally {
      if (success == false) {
        logger.trace("unable to lock all shards for index {}", index);
        IOUtils.closeWhileHandlingException(allLocks);
      }
    }
    return allLocks;
  }

  /**
   * Tries to lock the given shards ID. A shard lock is required to perform any kind of write
   * operation on a shards data directory like deleting files, creating a new index writer or
   * recover from a different shard instance into it. If the shard lock can not be acquired a {@link
   * ShardLockObtainFailedException} is thrown.
   *
   * <p>Note: this method will return immediately if the lock can't be acquired.
   *
   * @param id the shard ID to lock
   * @return the shard lock. Call {@link ShardLock#close()} to release the lock
   */
  public ShardLock shardLock(ShardId id) throws ShardLockObtainFailedException {
    return shardLock(id, 0);
  }

  /**
   * Tries to lock the given shards ID. A shard lock is required to perform any kind of write
   * operation on a shards data directory like deleting files, creating a new index writer or
   * recover from a different shard instance into it. If the shard lock can not be acquired a {@link
   * ShardLockObtainFailedException} is thrown
   *
   * @param shardId the shard ID to lock
   * @param lockTimeoutMS the lock timeout in milliseconds
   * @return the shard lock. Call {@link ShardLock#close()} to release the lock
   */
  public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS)
      throws ShardLockObtainFailedException {
    logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS);
    final InternalShardLock shardLock;
    final boolean acquired;
    synchronized (shardLocks) {
      if (shardLocks.containsKey(shardId)) {
        shardLock = shardLocks.get(shardId);
        shardLock.incWaitCount();
        acquired = false;
      } else {
        shardLock = new InternalShardLock(shardId);
        shardLocks.put(shardId, shardLock);
        acquired = true;
      }
    }
    if (acquired == false) {
      boolean success = false;
      try {
        shardLock.acquire(lockTimeoutMS);
        success = true;
      } finally {
        if (success == false) {
          shardLock.decWaitCount();
        }
      }
    }
    logger.trace("successfully acquired shardlock for [{}]", shardId);
    return new ShardLock(shardId) { // new instance prevents double closing
      @Override
      protected void closeInternal() {
        shardLock.release();
        logger.trace("released shard lock for [{}]", shardId);
      }
    };
  }

  /** A functional interface that people can use to reference {@link #shardLock(ShardId, long)} */
  @FunctionalInterface
  public interface ShardLocker {
    ShardLock lock(ShardId shardId, long lockTimeoutMS) throws ShardLockObtainFailedException;
  }

  /**
   * Returns all currently lock shards.
   *
   * <p>Note: the shard ids return do not contain a valid Index UUID
   */
  public Set<ShardId> lockedShards() {
    synchronized (shardLocks) {
      return unmodifiableSet(new HashSet<>(shardLocks.keySet()));
    }
  }

  private final class InternalShardLock {
    /*
     * This class holds a mutex for exclusive access and timeout / wait semantics
     * and a reference count to cleanup the shard lock instance form the internal data
     * structure if nobody is waiting for it. the wait count is guarded by the same lock
     * that is used to mutate the map holding the shard locks to ensure exclusive access
     */
    private final Semaphore mutex = new Semaphore(1);
    private int waitCount = 1; // guarded by shardLocks
    private final ShardId shardId;

    InternalShardLock(ShardId shardId) {
      this.shardId = shardId;
      mutex.acquireUninterruptibly();
    }

    protected void release() {
      mutex.release();
      decWaitCount();
    }

    void incWaitCount() {
      synchronized (shardLocks) {
        assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
        waitCount++;
      }
    }

    private void decWaitCount() {
      synchronized (shardLocks) {
        assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
        --waitCount;
        logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount);
        if (waitCount == 0) {
          logger.trace("last shard lock wait decremented, removing lock for {}", shardId);
          InternalShardLock remove = shardLocks.remove(shardId);
          assert remove != null : "Removed lock was null";
        }
      }
    }

    void acquire(long timeoutInMillis) throws ShardLockObtainFailedException {
      try {
        if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) {
          throw new ShardLockObtainFailedException(
              shardId, "obtaining shard lock timed out after " + timeoutInMillis + "ms");
        }
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new ShardLockObtainFailedException(
            shardId, "thread interrupted while trying to obtain shard lock", e);
      }
    }
  }

  public boolean hasNodeFile() {
    return nodePaths != null && locks != null;
  }

  /**
   * Returns an array of all of the nodes data locations.
   *
   * @throws IllegalStateException if the node is not configured to store local locations
   */
  public Path[] nodeDataPaths() {
    assertEnvIsLocked();
    Path[] paths = new Path[nodePaths.length];
    for (int i = 0; i < paths.length; i++) {
      paths[i] = nodePaths[i].path;
    }
    return paths;
  }

  /**
   * returns the unique uuid describing this node. The uuid is persistent in the data folder of this
   * node and remains across restarts.
   */
  public String nodeId() {
    // we currently only return the ID and hide the underlying nodeMetaData implementation in order
    // to avoid
    // confusion with other "metadata" like node settings found in elasticsearch.yml. In future
    // we can encapsulate both (and more) in one NodeMetaData (or NodeSettings) object ala
    // IndexSettings
    return nodeMetaData.nodeId();
  }

  /** Returns an array of all of the {@link NodePath}s. */
  public NodePath[] nodePaths() {
    assertEnvIsLocked();
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    return nodePaths;
  }

  /** Returns all index paths. */
  public Path[] indexPaths(Index index) {
    assertEnvIsLocked();
    Path[] indexPaths = new Path[nodePaths.length];
    for (int i = 0; i < nodePaths.length; i++) {
      indexPaths[i] = nodePaths[i].resolve(index);
    }
    return indexPaths;
  }

  /**
   * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of
   * the returned paths. The returned array may contain paths to non-existing directories.
   *
   * @see IndexSettings#hasCustomDataPath()
   * @see #resolveCustomLocation(IndexSettings, ShardId)
   */
  public Path[] availableShardPaths(ShardId shardId) {
    assertEnvIsLocked();
    final NodePath[] nodePaths = nodePaths();
    final Path[] shardLocations = new Path[nodePaths.length];
    for (int i = 0; i < nodePaths.length; i++) {
      shardLocations[i] = nodePaths[i].resolve(shardId);
    }
    return shardLocations;
  }

  /** Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder */
  public Set<String> availableIndexFolders() throws IOException {
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    Set<String> indexFolders = new HashSet<>();
    for (NodePath nodePath : nodePaths) {
      Path indicesLocation = nodePath.indicesPath;
      if (Files.isDirectory(indicesLocation)) {
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
          for (Path index : stream) {
            if (Files.isDirectory(index)) {
              indexFolders.add(index.getFileName().toString());
            }
          }
        }
      }
    }
    return indexFolders;
  }

  /**
   * Resolves all existing paths to <code>indexFolderName</code> in
   * ${data.paths}/nodes/{node.id}/indices
   */
  public Path[] resolveIndexFolder(String indexFolderName) throws IOException {
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    List<Path> paths = new ArrayList<>(nodePaths.length);
    for (NodePath nodePath : nodePaths) {
      Path indexFolder = nodePath.indicesPath.resolve(indexFolderName);
      if (Files.exists(indexFolder)) {
        paths.add(indexFolder);
      }
    }
    return paths.toArray(new Path[paths.size()]);
  }

  /**
   * Tries to find all allocated shards for the given index on the current node. NOTE: This methods
   * is prone to race-conditions on the filesystem layer since it might not see directories created
   * concurrently or while it's traversing.
   *
   * @param index the index to filter shards
   * @return a set of shard IDs
   * @throws IOException if an IOException occurs
   */
  public Set<ShardId> findAllShardIds(final Index index) throws IOException {
    assert index != null;
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    final Set<ShardId> shardIds = new HashSet<>();
    final String indexUniquePathId = index.getUUID();
    for (final NodePath nodePath : nodePaths) {
      Path location = nodePath.indicesPath;
      if (Files.isDirectory(location)) {
        try (DirectoryStream<Path> indexStream = Files.newDirectoryStream(location)) {
          for (Path indexPath : indexStream) {
            if (indexUniquePathId.equals(indexPath.getFileName().toString())) {
              shardIds.addAll(findAllShardsForIndex(indexPath, index));
            }
          }
        }
      }
    }
    return shardIds;
  }

  private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index)
      throws IOException {
    assert indexPath.getFileName().toString().equals(index.getUUID());
    Set<ShardId> shardIds = new HashSet<>();
    if (Files.isDirectory(indexPath)) {
      try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
        for (Path shardPath : stream) {
          String fileName = shardPath.getFileName().toString();
          if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
            int shardId = Integer.parseInt(fileName);
            ShardId id = new ShardId(index, shardId);
            shardIds.add(id);
          }
        }
      }
    }
    return shardIds;
  }

  @Override
  public void close() {
    if (closed.compareAndSet(false, true) && locks != null) {
      for (Lock lock : locks) {
        try {
          logger.trace("releasing lock [{}]", lock);
          lock.close();
        } catch (IOException e) {
          logger.trace(
              (Supplier<?>) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
        }
      }
    }
  }

  private void assertEnvIsLocked() {
    if (!closed.get() && locks != null) {
      for (Lock lock : locks) {
        try {
          lock.ensureValid();
        } catch (IOException e) {
          logger.warn("lock assertion failed", e);
          throw new IllegalStateException("environment is not locked", e);
        }
      }
    }
  }

  /**
   * This method tries to write an empty file and moves it using an atomic move operation. This
   * method throws an {@link IllegalStateException} if this operation is not supported by the
   * filesystem. This test is executed on each of the data directories. This method cleans up all
   * files even in the case of an error.
   */
  public void ensureAtomicMoveSupported() throws IOException {
    final NodePath[] nodePaths = nodePaths();
    for (NodePath nodePath : nodePaths) {
      assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory";
      final Path src = nodePath.path.resolve("__es__.tmp");
      final Path target = nodePath.path.resolve("__es__.final");
      try {
        Files.createFile(src);
        Files.move(src, target, StandardCopyOption.ATOMIC_MOVE);
      } catch (AtomicMoveNotSupportedException ex) {
        throw new IllegalStateException(
            "atomic_move is not supported by the filesystem on path ["
                + nodePath.path
                + "] atomic_move is required for elasticsearch to work correctly.",
            ex);
      } finally {
        try {
          Files.deleteIfExists(src);
        } finally {
          Files.deleteIfExists(target);
        }
      }
    }
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   */
  public Path resolveBaseCustomLocation(IndexSettings indexSettings) {
    String customDataDir = indexSettings.customDataPath();
    if (customDataDir != null) {
      // This assert is because this should be caught by MetaDataCreateIndexService
      assert sharedDataPath != null;
      if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) {
        return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
      } else {
        return sharedDataPath.resolve(customDataDir);
      }
    } else {
      throw new IllegalArgumentException(
          "no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available");
    }
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   */
  private Path resolveIndexCustomLocation(IndexSettings indexSettings) {
    return resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getUUID());
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   * @param shardId shard to resolve the path to
   */
  public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) {
    return resolveIndexCustomLocation(indexSettings).resolve(Integer.toString(shardId.id()));
  }

  /** Returns the {@code NodePath.path} for this shard. */
  public static Path shardStatePathToDataPath(Path shardPath) {
    int count = shardPath.getNameCount();

    // Sanity check:
    assert Integer.parseInt(shardPath.getName(count - 1).toString()) >= 0;
    assert "indices".equals(shardPath.getName(count - 3).toString());

    return shardPath.getParent().getParent().getParent();
  }

  /**
   * This is a best effort to ensure that we actually have write permissions to write in all our
   * data directories. This prevents disasters if nodes are started under the wrong username etc.
   */
  private void assertCanWrite() throws IOException {
    for (Path path : nodeDataPaths()) { // check node-paths are writable
      tryWriteTempFile(path);
    }
    for (String indexFolderName : this.availableIndexFolders()) {
      for (Path indexPath :
          this.resolveIndexFolder(indexFolderName)) { // check index paths are writable
        Path indexStatePath = indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
        tryWriteTempFile(indexStatePath);
        tryWriteTempFile(indexPath);
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
          for (Path shardPath : stream) {
            String fileName = shardPath.getFileName().toString();
            if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
              Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
              Path statePath = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
              Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
              tryWriteTempFile(indexDir);
              tryWriteTempFile(translogDir);
              tryWriteTempFile(statePath);
              tryWriteTempFile(shardPath);
            }
          }
        }
      }
    }
  }

  private static void tryWriteTempFile(Path path) throws IOException {
    if (Files.exists(path)) {
      Path resolve = path.resolve(".es_temp_file");
      try {
        Files.createFile(resolve);
        Files.deleteIfExists(resolve);
      } catch (IOException ex) {
        throw new IOException(
            "failed to write in data directory [" + path + "] write permission is required", ex);
      }
    }
  }
}
/**
 * IndexModule represents the central extension point for index level custom implementations like:
 *
 * <ul>
 *   <li>{@link SimilarityProvider} - New {@link SimilarityProvider} implementations can be
 *       registered through {@link #addSimilarity(String, BiFunction)} while existing Providers can
 *       be referenced through Settings under the {@link IndexModule#SIMILARITY_SETTINGS_PREFIX}
 *       prefix along with the "type" value. For example, to reference the {@link
 *       BM25SimilarityProvider}, the configuration <tt>"index.similarity.my_similarity.type :
 *       "BM25"</tt> can be used.
 *   <li>{@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link
 *       #addIndexStore(String, BiFunction)}
 *   <li>{@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered
 *       via {@link #addIndexEventListener(IndexEventListener)}
 *   <li>Settings update listener - Custom settings update listener can be registered via {@link
 *       #addSettingsUpdateConsumer(Setting, Consumer)}
 * </ul>
 */
public final class IndexModule {

  public static final Setting<String> INDEX_STORE_TYPE_SETTING =
      new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX);
  public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
  public static final String INDEX_QUERY_CACHE = "index";
  public static final String NONE_QUERY_CACHE = "none";
  public static final Setting<String> INDEX_QUERY_CACHE_TYPE_SETTING =
      new Setting<>(
          "index.queries.cache.type",
          INDEX_QUERY_CACHE,
          Function.identity(),
          false,
          Setting.Scope.INDEX);
  // for test purposes only
  public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING =
      Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX);
  private final IndexSettings indexSettings;
  private final IndexStoreConfig indexStoreConfig;
  private final AnalysisRegistry analysisRegistry;
  // pkg private so tests can mock
  final SetOnce<EngineFactory> engineFactory = new SetOnce<>();
  private SetOnce<IndexSearcherWrapperFactory> indexSearcherWrapper = new SetOnce<>();
  private final Set<IndexEventListener> indexEventListeners = new HashSet<>();
  private IndexEventListener listener;
  private final Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities =
      new HashMap<>();
  private final Map<String, BiFunction<IndexSettings, IndexStoreConfig, IndexStore>> storeTypes =
      new HashMap<>();
  private final Map<String, BiFunction<IndexSettings, IndicesQueryCache, QueryCache>> queryCaches =
      new HashMap<>();

  public IndexModule(
      IndexSettings indexSettings,
      IndexStoreConfig indexStoreConfig,
      AnalysisRegistry analysisRegistry) {
    this.indexStoreConfig = indexStoreConfig;
    this.indexSettings = indexSettings;
    this.analysisRegistry = analysisRegistry;
    registerQueryCache(INDEX_QUERY_CACHE, IndexQueryCache::new);
    registerQueryCache(NONE_QUERY_CACHE, (a, b) -> new NoneQueryCache(a));
  }

  /** Adds a Setting and it's consumer for this index. */
  public <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
    if (setting == null) {
      throw new IllegalArgumentException("setting must not be null");
    }
    indexSettings.getScopedSettings().addSettingsUpdateConsumer(setting, consumer);
  }

  /** Returns the index {@link Settings} for this index */
  public Settings getSettings() {
    return indexSettings.getSettings();
  }

  /** Returns the index this module is associated with */
  public Index getIndex() {
    return indexSettings.getIndex();
  }

  /**
   * Adds an {@link IndexEventListener} for this index. All listeners added here are maintained for
   * the entire index lifecycle on this node. Once an index is closed or deleted these listeners go
   * out of scope.
   *
   * <p>Note: an index might be created on a node multiple times. For instance if the last shard
   * from an index is relocated to another node the internal representation will be destroyed which
   * includes the registered listeners. Once the node holds at least one shard of an index all
   * modules are reloaded and listeners are registered again. Listeners can't be unregistered they
   * will stay alive for the entire time the index is allocated on a node.
   */
  public void addIndexEventListener(IndexEventListener listener) {
    if (this.listener != null) {
      throw new IllegalStateException("can't add listener after listeners are frozen");
    }
    if (listener == null) {
      throw new IllegalArgumentException("listener must not be null");
    }
    if (indexEventListeners.contains(listener)) {
      throw new IllegalArgumentException("listener already added");
    }

    this.indexEventListeners.add(listener);
  }

  /**
   * Adds an {@link IndexStore} type to this index module. Typically stores are registered with a
   * refrence to it's constructor:
   *
   * <pre>
   *     indexModule.addIndexStore("my_store_type", MyStore::new);
   * </pre>
   *
   * @param type the type to register
   * @param provider the instance provider / factory method
   */
  public void addIndexStore(
      String type, BiFunction<IndexSettings, IndexStoreConfig, IndexStore> provider) {
    if (storeTypes.containsKey(type)) {
      throw new IllegalArgumentException("key [" + type + "] already registerd");
    }
    storeTypes.put(type, provider);
  }

  /**
   * Registers the given {@link SimilarityProvider} with the given name
   *
   * @param name Name of the SimilarityProvider
   * @param similarity SimilarityProvider to register
   */
  public void addSimilarity(
      String name, BiFunction<String, Settings, SimilarityProvider> similarity) {
    if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) {
      throw new IllegalArgumentException(
          "similarity for name: [" + name + " is already registered");
    }
    similarities.put(name, similarity);
  }

  /**
   * Registers a {@link QueryCache} provider for a given name
   *
   * @param name the providers / caches name
   * @param provider the provider instance
   */
  public void registerQueryCache(
      String name, BiFunction<IndexSettings, IndicesQueryCache, QueryCache> provider) {
    if (provider == null) {
      throw new IllegalArgumentException("provider must not be null");
    }
    if (queryCaches.containsKey(name)) {
      throw new IllegalArgumentException(
          "Can't register the same [query_cache] more than once for [" + name + "]");
    }
    queryCaches.put(name, provider);
  }

  /**
   * Sets a {@link org.elasticsearch.index.IndexModule.IndexSearcherWrapperFactory} that is called
   * once the IndexService is fully constructed. Note: this method can only be called once per
   * index. Multiple wrappers are not supported.
   */
  public void setSearcherWrapper(IndexSearcherWrapperFactory indexSearcherWrapperFactory) {
    this.indexSearcherWrapper.set(indexSearcherWrapperFactory);
  }

  public IndexEventListener freeze() {
    // TODO somehow we need to make this pkg private...
    if (listener == null) {
      listener = new CompositeIndexEventListener(indexSettings, indexEventListeners);
    }
    return listener;
  }

  private static boolean isBuiltinType(String storeType) {
    for (Type type : Type.values()) {
      if (type.match(storeType)) {
        return true;
      }
    }
    return false;
  }

  public enum Type {
    NIOFS,
    MMAPFS,
    SIMPLEFS,
    FS,
    DEFAULT;

    public String getSettingsKey() {
      return this.name().toLowerCase(Locale.ROOT);
    }
    /** Returns true iff this settings matches the type. */
    public boolean match(String setting) {
      return getSettingsKey().equals(setting);
    }
  }

  /** Factory for creating new {@link IndexSearcherWrapper} instances */
  public interface IndexSearcherWrapperFactory {
    /** Returns a new IndexSearcherWrapper. This method is called once per index per node */
    IndexSearcherWrapper newWrapper(final IndexService indexService);
  }

  public IndexService newIndexService(
      NodeEnvironment environment,
      IndexService.ShardStoreDeleter shardStoreDeleter,
      NodeServicesProvider servicesProvider,
      MapperRegistry mapperRegistry,
      IndexingOperationListener... listeners)
      throws IOException {
    IndexSearcherWrapperFactory searcherWrapperFactory =
        indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get();
    IndexEventListener eventListener = freeze();
    final String storeType = indexSettings.getValue(INDEX_STORE_TYPE_SETTING);
    final IndexStore store;
    if (Strings.isEmpty(storeType) || isBuiltinType(storeType)) {
      store = new IndexStore(indexSettings, indexStoreConfig);
    } else {
      BiFunction<IndexSettings, IndexStoreConfig, IndexStore> factory = storeTypes.get(storeType);
      if (factory == null) {
        throw new IllegalArgumentException("Unknown store type [" + storeType + "]");
      }
      store = factory.apply(indexSettings, indexStoreConfig);
      if (store == null) {
        throw new IllegalStateException("store must not be null");
      }
    }
    indexSettings
        .getScopedSettings()
        .addSettingsUpdateConsumer(
            IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate);
    indexSettings
        .getScopedSettings()
        .addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING, store::setType);
    final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING);
    final BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider =
        queryCaches.get(queryCacheType);
    final QueryCache queryCache =
        queryCacheProvider.apply(indexSettings, servicesProvider.getIndicesQueryCache());
    return new IndexService(
        indexSettings,
        environment,
        new SimilarityService(indexSettings, similarities),
        shardStoreDeleter,
        analysisRegistry,
        engineFactory.get(),
        servicesProvider,
        queryCache,
        store,
        eventListener,
        searcherWrapperFactory,
        mapperRegistry,
        listeners);
  }
}