Ejemplo n.º 1
0
public class MapperService extends AbstractIndexComponent {

  /** The reason why a mapping is being merged. */
  public enum MergeReason {
    /** Create or update a mapping. */
    MAPPING_UPDATE,
    /**
     * Recovery of an existing mapping, for instance because of a restart, if a shard was moved to a
     * different node or for administrative purposes.
     */
    MAPPING_RECOVERY;
  }

  public static final String DEFAULT_MAPPING = "_default_";
  public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope);
  public static final Setting<Long> INDEX_MAPPING_DEPTH_LIMIT_SETTING =
      Setting.longSetting(
          "index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope);
  public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
  public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
      Setting.boolSetting(
          "index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
  private static ObjectHashSet<String> META_FIELDS =
      ObjectHashSet.from(
          "_uid",
          "_id",
          "_type",
          "_all",
          "_parent",
          "_routing",
          "_index",
          "_size",
          "_timestamp",
          "_ttl");

  private final AnalysisService analysisService;

  /** Will create types automatically if they do not exists in the mapping definition yet */
  private final boolean dynamic;

  private volatile String defaultMappingSource;

  private volatile Map<String, DocumentMapper> mappers = emptyMap();

  private volatile FieldTypeLookup fieldTypes;
  private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
  private boolean hasNested = false; // updated dynamically to true when a nested object is added

  private final DocumentMapperParser documentParser;

  private final MapperAnalyzerWrapper indexAnalyzer;
  private final MapperAnalyzerWrapper searchAnalyzer;
  private final MapperAnalyzerWrapper searchQuoteAnalyzer;

  private volatile Map<String, MappedFieldType> unmappedFieldTypes = emptyMap();

  private volatile Set<String> parentTypes = emptySet();

  final MapperRegistry mapperRegistry;

  public MapperService(
      IndexSettings indexSettings,
      AnalysisService analysisService,
      SimilarityService similarityService,
      MapperRegistry mapperRegistry,
      Supplier<QueryShardContext> queryShardContextSupplier) {
    super(indexSettings);
    this.analysisService = analysisService;
    this.fieldTypes = new FieldTypeLookup();
    this.documentParser =
        new DocumentMapperParser(
            indexSettings,
            this,
            analysisService,
            similarityService,
            mapperRegistry,
            queryShardContextSupplier);
    this.indexAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), p -> p.indexAnalyzer());
    this.searchAnalyzer =
        new MapperAnalyzerWrapper(analysisService.defaultSearchAnalyzer(), p -> p.searchAnalyzer());
    this.searchQuoteAnalyzer =
        new MapperAnalyzerWrapper(
            analysisService.defaultSearchQuoteAnalyzer(), p -> p.searchQuoteAnalyzer());
    this.mapperRegistry = mapperRegistry;

    this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
    if (index().getName().equals(ScriptService.SCRIPT_INDEX)) {
      defaultMappingSource =
          "{"
              + "\"_default_\": {"
              + "\"properties\": {"
              + "\"script\": { \"enabled\": false },"
              + "\"template\": { \"enabled\": false }"
              + "}"
              + "}"
              + "}";
    } else {
      defaultMappingSource = "{\"_default_\":{}}";
    }

    if (logger.isTraceEnabled()) {
      logger.trace("using dynamic[{}], default mapping source[{}]", dynamic, defaultMappingSource);
    } else if (logger.isDebugEnabled()) {
      logger.debug("using dynamic[{}]", dynamic);
    }
  }

  public boolean hasNested() {
    return this.hasNested;
  }

  /**
   * returns an immutable iterator over current document mappers.
   *
   * @param includingDefaultMapping indicates whether the iterator should contain the {@link
   *     #DEFAULT_MAPPING} document mapper. As is this not really an active type, you would
   *     typically set this to false
   */
  public Iterable<DocumentMapper> docMappers(final boolean includingDefaultMapping) {
    return () -> {
      final Collection<DocumentMapper> documentMappers;
      if (includingDefaultMapping) {
        documentMappers = mappers.values();
      } else {
        documentMappers =
            mappers
                .values()
                .stream()
                .filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type()))
                .collect(Collectors.toList());
      }
      return Collections.unmodifiableCollection(documentMappers).iterator();
    };
  }

  public AnalysisService analysisService() {
    return this.analysisService;
  }

  public DocumentMapperParser documentMapperParser() {
    return this.documentParser;
  }

  public DocumentMapper merge(
      String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
    if (DEFAULT_MAPPING.equals(type)) {
      // verify we can parse it
      // NOTE: never apply the default here
      DocumentMapper mapper = documentParser.parse(type, mappingSource);
      // still add it as a document mapper so we have it registered and, for example, persisted back
      // into
      // the cluster meta data if needed, or checked for existence
      synchronized (this) {
        mappers = newMapBuilder(mappers).put(type, mapper).map();
      }
      try {
        defaultMappingSource = mappingSource.string();
      } catch (IOException e) {
        throw new ElasticsearchGenerationException("failed to un-compress", e);
      }
      return mapper;
    } else {
      synchronized (this) {
        final boolean applyDefault =
            // the default was already applied if we are recovering
            reason != MergeReason.MAPPING_RECOVERY
                // only apply the default mapping if we don't have the type yet
                && mappers.containsKey(type) == false;
        DocumentMapper mergeWith = parse(type, mappingSource, applyDefault);
        return merge(mergeWith, reason, updateAllTypes);
      }
    }
  }

  private synchronized DocumentMapper merge(
      DocumentMapper mapper, MergeReason reason, boolean updateAllTypes) {
    if (mapper.type().length() == 0) {
      throw new InvalidTypeNameException("mapping type name is empty");
    }
    if (mapper.type().length() > 255) {
      throw new InvalidTypeNameException(
          "mapping type name ["
              + mapper.type()
              + "] is too long; limit is length 255 but was ["
              + mapper.type().length()
              + "]");
    }
    if (mapper.type().charAt(0) == '_') {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] can't start with '_'");
    }
    if (mapper.type().contains("#")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include '#' in it");
    }
    if (mapper.type().contains(",")) {
      throw new InvalidTypeNameException(
          "mapping type name [" + mapper.type() + "] should not include ',' in it");
    }
    if (mapper.type().equals(mapper.parentFieldMapper().type())) {
      throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
    }
    if (typeNameStartsWithIllegalDot(mapper)) {
      throw new IllegalArgumentException(
          "mapping type name [" + mapper.type() + "] must not start with a '.'");
    }

    // 1. compute the merged DocumentMapper
    DocumentMapper oldMapper = mappers.get(mapper.type());
    DocumentMapper newMapper;
    if (oldMapper != null) {
      newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
    } else {
      newMapper = mapper;
    }

    // 2. check basic sanity of the new mapping
    List<ObjectMapper> objectMappers = new ArrayList<>();
    List<FieldMapper> fieldMappers = new ArrayList<>();
    Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
    MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
    checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
    checkObjectsCompatibility(newMapper.type(), objectMappers, fieldMappers, updateAllTypes);

    // 3. update lookup data-structures
    // this will in particular make sure that the merged fields are compatible with other types
    FieldTypeLookup fieldTypes =
        this.fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);

    boolean hasNested = this.hasNested;
    Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
    for (ObjectMapper objectMapper : objectMappers) {
      fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
      if (objectMapper.nested().isNested()) {
        hasNested = true;
      }
    }
    fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);

    if (reason == MergeReason.MAPPING_UPDATE) {
      // this check will only be performed on the master node when there is
      // a call to the update mapping API. For all other cases like
      // the master node restoring mappings from disk or data nodes
      // deserializing cluster state that was sent by the master node,
      // this check will be skipped.
      checkNestedFieldsLimit(fullPathObjectMappers);
      checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
      checkDepthLimit(fullPathObjectMappers.keySet());
      checkPercolatorFieldLimit(fieldTypes);
    }

    Set<String> parentTypes = this.parentTypes;
    if (oldMapper == null && newMapper.parentFieldMapper().active()) {
      parentTypes = new HashSet<>(parentTypes.size() + 1);
      parentTypes.addAll(this.parentTypes);
      parentTypes.add(mapper.parentFieldMapper().type());
      parentTypes = Collections.unmodifiableSet(parentTypes);
    }

    Map<String, DocumentMapper> mappers = new HashMap<>(this.mappers);
    mappers.put(newMapper.type(), newMapper);
    for (Map.Entry<String, DocumentMapper> entry : mappers.entrySet()) {
      if (entry.getKey().equals(DEFAULT_MAPPING)) {
        continue;
      }
      DocumentMapper m = entry.getValue();
      // apply changes to the field types back
      m = m.updateFieldType(fieldTypes.fullNameToFieldType);
      entry.setValue(m);
    }
    mappers = Collections.unmodifiableMap(mappers);

    // 4. commit the change
    this.mappers = mappers;
    this.fieldTypes = fieldTypes;
    this.hasNested = hasNested;
    this.fullPathObjectMappers = fullPathObjectMappers;
    this.parentTypes = parentTypes;

    assert assertSerialization(newMapper);
    assert assertMappersShareSameFieldType();

    return newMapper;
  }

  private boolean assertMappersShareSameFieldType() {
    for (DocumentMapper mapper : docMappers(false)) {
      List<FieldMapper> fieldMappers = new ArrayList<>();
      Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
      MapperUtils.collect(mapper.root(), new ArrayList<ObjectMapper>(), fieldMappers);
      for (FieldMapper fieldMapper : fieldMappers) {
        assert fieldMapper.fieldType() == fieldTypes.get(fieldMapper.name()) : fieldMapper.name();
      }
    }
    return true;
  }

  private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
    boolean legacyIndex =
        getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha1);
    if (legacyIndex) {
      return mapper.type().startsWith(".")
          && !PercolatorFieldMapper.LEGACY_TYPE_NAME.equals(mapper.type());
    } else {
      return mapper.type().startsWith(".");
    }
  }

  private boolean assertSerialization(DocumentMapper mapper) {
    // capture the source now, it may change due to concurrent parsing
    final CompressedXContent mappingSource = mapper.mappingSource();
    DocumentMapper newMapper = parse(mapper.type(), mappingSource, false);

    if (newMapper.mappingSource().equals(mappingSource) == false) {
      throw new IllegalStateException(
          "DocumentMapper serialization result is different from source. \n--> Source ["
              + mappingSource
              + "]\n--> Result ["
              + newMapper.mappingSource()
              + "]");
    }
    return true;
  }

  private void checkFieldUniqueness(
      String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
    assert Thread.holdsLock(this);

    // first check within mapping
    final Set<String> objectFullNames = new HashSet<>();
    for (ObjectMapper objectMapper : objectMappers) {
      final String fullPath = objectMapper.fullPath();
      if (objectFullNames.add(fullPath) == false) {
        throw new IllegalArgumentException(
            "Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
      }
    }

    final Set<String> fieldNames = new HashSet<>();
    for (FieldMapper fieldMapper : fieldMappers) {
      final String name = fieldMapper.name();
      if (objectFullNames.contains(name)) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined both as an object and a field in [" + type + "]");
      } else if (fieldNames.add(name) == false) {
        throw new IllegalArgumentException(
            "Field [" + name + "] is defined twice in [" + type + "]");
      }
    }

    // then check other types
    for (String fieldName : fieldNames) {
      if (fullPathObjectMappers.containsKey(fieldName)) {
        throw new IllegalArgumentException(
            "["
                + fieldName
                + "] is defined as a field in mapping ["
                + type
                + "] but this name is already used for an object in other types");
      }
    }

    for (String objectPath : objectFullNames) {
      if (fieldTypes.get(objectPath) != null) {
        throw new IllegalArgumentException(
            "["
                + objectPath
                + "] is defined as an object in mapping ["
                + type
                + "] but this name is already used for a field in other types");
      }
    }
  }

  private void checkObjectsCompatibility(
      String type,
      Collection<ObjectMapper> objectMappers,
      Collection<FieldMapper> fieldMappers,
      boolean updateAllTypes) {
    assert Thread.holdsLock(this);

    for (ObjectMapper newObjectMapper : objectMappers) {
      ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
      if (existingObjectMapper != null) {
        // simulate a merge and ignore the result, we are just interested
        // in exceptions here
        existingObjectMapper.merge(newObjectMapper, updateAllTypes);
      }
    }
  }

  private void checkNestedFieldsLimit(Map<String, ObjectMapper> fullPathObjectMappers) {
    long allowedNestedFields = indexSettings.getValue(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING);
    long actualNestedFields = 0;
    for (ObjectMapper objectMapper : fullPathObjectMappers.values()) {
      if (objectMapper.nested().isNested()) {
        actualNestedFields++;
      }
    }
    if (actualNestedFields > allowedNestedFields) {
      throw new IllegalArgumentException(
          "Limit of nested fields ["
              + allowedNestedFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkTotalFieldsLimit(long totalMappers) {
    long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
    if (allowedTotalFields < totalMappers) {
      throw new IllegalArgumentException(
          "Limit of total fields ["
              + allowedTotalFields
              + "] in index ["
              + index().getName()
              + "] has been exceeded");
    }
  }

  private void checkDepthLimit(Collection<String> objectPaths) {
    final long maxDepth = indexSettings.getValue(INDEX_MAPPING_DEPTH_LIMIT_SETTING);
    for (String objectPath : objectPaths) {
      checkDepthLimit(objectPath, maxDepth);
    }
  }

  private void checkDepthLimit(String objectPath, long maxDepth) {
    int numDots = 0;
    for (int i = 0; i < objectPath.length(); ++i) {
      if (objectPath.charAt(i) == '.') {
        numDots += 1;
      }
    }
    final int depth = numDots + 2;
    if (depth > maxDepth) {
      throw new IllegalArgumentException(
          "Limit of mapping depth ["
              + maxDepth
              + "] in index ["
              + index().getName()
              + "] has been exceeded due to object field ["
              + objectPath
              + "]");
    }
  }

  /**
   * We only allow upto 1 percolator field per index.
   *
   * <p>Reasoning here is that the PercolatorQueryCache only supports a single document having a
   * percolator query. Also specifying multiple queries per document feels like an anti pattern
   */
  private void checkPercolatorFieldLimit(Iterable<MappedFieldType> fieldTypes) {
    List<String> percolatorFieldTypes = new ArrayList<>();
    for (MappedFieldType fieldType : fieldTypes) {
      if (fieldType instanceof PercolatorFieldMapper.PercolatorFieldType) {
        percolatorFieldTypes.add(fieldType.name());
      }
    }
    if (percolatorFieldTypes.size() > 1) {
      throw new IllegalArgumentException(
          "Up to one percolator field type is allowed per index, "
              + "found the following percolator fields ["
              + percolatorFieldTypes
              + "]");
    }
  }

  public DocumentMapper parse(
      String mappingType, CompressedXContent mappingSource, boolean applyDefault)
      throws MapperParsingException {
    return documentParser.parse(
        mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
  }

  public boolean hasMapping(String mappingType) {
    return mappers.containsKey(mappingType);
  }

  /**
   * Return the set of concrete types that have a mapping. NOTE: this does not return the default
   * mapping.
   */
  public Collection<String> types() {
    final Set<String> types = new HashSet<>(mappers.keySet());
    types.remove(DEFAULT_MAPPING);
    return Collections.unmodifiableSet(types);
  }

  /**
   * Return the {@link DocumentMapper} for the given type. By using the special {@value
   * #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for the default mapping.
   */
  public DocumentMapper documentMapper(String type) {
    return mappers.get(type);
  }

  /**
   * Returns the document mapper created, including a mapping update if the type has been
   * dynamically created.
   */
  public DocumentMapperForType documentMapperWithAutoCreate(String type) {
    DocumentMapper mapper = mappers.get(type);
    if (mapper != null) {
      return new DocumentMapperForType(mapper, null);
    }
    if (!dynamic) {
      throw new TypeMissingException(
          index(), type, "trying to auto create mapping, but dynamic mapping is disabled");
    }
    mapper = parse(type, null, true);
    return new DocumentMapperForType(mapper, mapper.mapping());
  }

  /**
   * Returns the {@link MappedFieldType} for the give fullName.
   *
   * <p>If multiple types have fields with the same full name, the first is returned.
   */
  public MappedFieldType fullName(String fullName) {
    return fieldTypes.get(fullName);
  }

  /**
   * Returns all the fields that match the given pattern. If the pattern is prefixed with a type
   * then the fields will be returned with a type prefix.
   */
  public Collection<String> simpleMatchToIndexNames(String pattern) {
    if (Regex.isSimpleMatchPattern(pattern) == false) {
      // no wildcards
      return Collections.singletonList(pattern);
    }
    return fieldTypes.simpleMatchToFullName(pattern);
  }

  public ObjectMapper getObjectMapper(String name) {
    return fullPathObjectMappers.get(name);
  }

  /**
   * Given a type (eg. long, string, ...), return an anonymous field mapper that can be used for
   * search operations.
   */
  public MappedFieldType unmappedFieldType(String type) {
    if (type.equals("string")) {
      deprecationLogger.deprecated(
          "[unmapped_type:string] should be replaced with [unmapped_type:keyword]");
      type = "keyword";
    }
    MappedFieldType fieldType = unmappedFieldTypes.get(type);
    if (fieldType == null) {
      final Mapper.TypeParser.ParserContext parserContext =
          documentMapperParser().parserContext(type);
      Mapper.TypeParser typeParser = parserContext.typeParser(type);
      if (typeParser == null) {
        throw new IllegalArgumentException("No mapper found for type [" + type + "]");
      }
      final Mapper.Builder<?, ?> builder =
          typeParser.parse("__anonymous_" + type, emptyMap(), parserContext);
      final BuilderContext builderContext =
          new BuilderContext(indexSettings.getSettings(), new ContentPath(1));
      fieldType = ((FieldMapper) builder.build(builderContext)).fieldType();

      // There is no need to synchronize writes here. In the case of concurrent access, we could
      // just
      // compute some mappers several times, which is not a big deal
      Map<String, MappedFieldType> newUnmappedFieldTypes = new HashMap<>();
      newUnmappedFieldTypes.putAll(unmappedFieldTypes);
      newUnmappedFieldTypes.put(type, fieldType);
      unmappedFieldTypes = unmodifiableMap(newUnmappedFieldTypes);
    }
    return fieldType;
  }

  public Analyzer indexAnalyzer() {
    return this.indexAnalyzer;
  }

  public Analyzer searchAnalyzer() {
    return this.searchAnalyzer;
  }

  public Analyzer searchQuoteAnalyzer() {
    return this.searchQuoteAnalyzer;
  }

  public Set<String> getParentTypes() {
    return parentTypes;
  }

  /** @return Whether a field is a metadata field. */
  public static boolean isMetadataField(String fieldName) {
    return META_FIELDS.contains(fieldName);
  }

  public static String[] getAllMetaFields() {
    return META_FIELDS.toArray(String.class);
  }

  /** An analyzer wrapper that can lookup fields within the index mappings */
  final class MapperAnalyzerWrapper extends DelegatingAnalyzerWrapper {

    private final Analyzer defaultAnalyzer;
    private final Function<MappedFieldType, Analyzer> extractAnalyzer;

    MapperAnalyzerWrapper(
        Analyzer defaultAnalyzer, Function<MappedFieldType, Analyzer> extractAnalyzer) {
      super(Analyzer.PER_FIELD_REUSE_STRATEGY);
      this.defaultAnalyzer = defaultAnalyzer;
      this.extractAnalyzer = extractAnalyzer;
    }

    @Override
    protected Analyzer getWrappedAnalyzer(String fieldName) {
      MappedFieldType fieldType = fullName(fieldName);
      if (fieldType != null) {
        Analyzer analyzer = extractAnalyzer.apply(fieldType);
        if (analyzer != null) {
          return analyzer;
        }
      }
      return defaultAnalyzer;
    }
  }
}
Ejemplo n.º 2
0
/** A component that holds all data paths for a single node. */
public final class NodeEnvironment implements Closeable {

  private final Logger logger;

  public static class NodePath {
    /* ${data.paths}/nodes/{node.id} */
    public final Path path;
    /* ${data.paths}/nodes/{node.id}/indices */
    public final Path indicesPath;
    /** Cached FileStore from path */
    public final FileStore fileStore;
    /**
     * Cached result of Lucene's {@code IOUtils.spins} on path. This is a trilean value: null means
     * we could not determine it (we are not running on Linux, or we hit an exception trying), True
     * means the device possibly spins and False means it does not.
     */
    public final Boolean spins;

    public final int majorDeviceNumber;
    public final int minorDeviceNumber;

    public NodePath(Path path) throws IOException {
      this.path = path;
      this.indicesPath = path.resolve(INDICES_FOLDER);
      this.fileStore = Environment.getFileStore(path);
      if (fileStore.supportsFileAttributeView("lucene")) {
        this.spins = (Boolean) fileStore.getAttribute("lucene:spins");
        this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
        this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
      } else {
        this.spins = null;
        this.majorDeviceNumber = -1;
        this.minorDeviceNumber = -1;
      }
    }

    /**
     * Resolves the given shards directory against this NodePath
     * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id}
     */
    public Path resolve(ShardId shardId) {
      return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id()));
    }

    /**
     * Resolves index directory against this NodePath
     * ${data.paths}/nodes/{node.id}/indices/{index.uuid}
     */
    public Path resolve(Index index) {
      return indicesPath.resolve(index.getUUID());
    }

    @Override
    public String toString() {
      return "NodePath{" + "path=" + path + ", spins=" + spins + '}';
    }
  }

  private final NodePath[] nodePaths;
  private final Path sharedDataPath;
  private final Lock[] locks;

  private final int nodeLockId;
  private final AtomicBoolean closed = new AtomicBoolean(false);
  private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>();

  private final NodeMetaData nodeMetaData;

  /** Maximum number of data nodes that should run in an environment. */
  public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING =
      Setting.intSetting("node.max_local_storage_nodes", 1, 1, Property.NodeScope);

  /** If true automatically append node lock id to custom data paths. */
  public static final Setting<Boolean> ADD_NODE_LOCK_ID_TO_CUSTOM_PATH =
      Setting.boolSetting("node.add_lock_id_to_custom_path", true, Property.NodeScope);

  /**
   * Seed for determining a persisted unique uuid of this node. If the node has already a persisted
   * uuid on disk, this seed will be ignored and the uuid from disk will be reused.
   */
  public static final Setting<Long> NODE_ID_SEED_SETTING =
      Setting.longSetting("node.id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);

  /** If true the [verbose] SegmentInfos.infoStream logging is sent to System.out. */
  public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING =
      Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope);

  public static final String NODES_FOLDER = "nodes";
  public static final String INDICES_FOLDER = "indices";
  public static final String NODE_LOCK_FILENAME = "node.lock";

  public NodeEnvironment(Settings settings, Environment environment) throws IOException {

    if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
      nodePaths = null;
      sharedDataPath = null;
      locks = null;
      nodeLockId = -1;
      nodeMetaData = new NodeMetaData(generateNodeId(settings));
      logger =
          Loggers.getLogger(
              getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
      return;
    }
    final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
    final Lock[] locks = new Lock[nodePaths.length];
    boolean success = false;

    // trace logger to debug issues before the default node name is derived from the node id
    Logger startupTraceLogger = Loggers.getLogger(getClass(), settings);

    try {
      sharedDataPath = environment.sharedDataFile();
      int nodeLockId = -1;
      IOException lastException = null;
      int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
      for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
        for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
          Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
          Path dataDir = environment.dataFiles()[dirIndex];
          Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
          Files.createDirectories(dir);

          try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
            startupTraceLogger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
            try {
              locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
              nodePaths[dirIndex] = new NodePath(dir);
              nodeLockId = possibleLockId;
            } catch (LockObtainFailedException ex) {
              startupTraceLogger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
              // release all the ones that were obtained up until now
              releaseAndNullLocks(locks);
              break;
            }

          } catch (IOException e) {
            startupTraceLogger.trace(
                (Supplier<?>)
                    () ->
                        new ParameterizedMessage(
                            "failed to obtain node lock on {}", dir.toAbsolutePath()),
                e);
            lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
            // release all the ones that were obtained up until now
            releaseAndNullLocks(locks);
            break;
          }
        }
        if (locks[0] != null) {
          // we found a lock, break
          break;
        }
      }

      if (locks[0] == null) {
        final String message =
            String.format(
                Locale.ROOT,
                "failed to obtain node locks, tried [%s] with lock id%s;"
                    + " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?",
                Arrays.toString(environment.dataWithClusterFiles()),
                maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]",
                MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
                maxLocalStorageNodes);
        throw new IllegalStateException(message, lastException);
      }
      this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
      this.logger =
          Loggers.getLogger(
              getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));

      this.nodeLockId = nodeLockId;
      this.locks = locks;
      this.nodePaths = nodePaths;

      if (logger.isDebugEnabled()) {
        logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId);
      }

      maybeLogPathDetails();
      maybeLogHeapDetails();

      applySegmentInfosTrace(settings);
      assertCanWrite();
      success = true;
    } finally {
      if (success == false) {
        IOUtils.closeWhileHandlingException(locks);
      }
    }
  }

  /** Returns true if the directory is empty */
  private static boolean dirEmpty(final Path path) throws IOException {
    try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
      return stream.iterator().hasNext() == false;
    }
  }

  private static void releaseAndNullLocks(Lock[] locks) {
    for (int i = 0; i < locks.length; i++) {
      if (locks[i] != null) {
        IOUtils.closeWhileHandlingException(locks[i]);
      }
      locks[i] = null;
    }
  }

  private void maybeLogPathDetails() throws IOException {

    // We do some I/O in here, so skip this if DEBUG/INFO are not enabled:
    if (logger.isDebugEnabled()) {
      // Log one line per path.data:
      StringBuilder sb = new StringBuilder();
      for (NodePath nodePath : nodePaths) {
        sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());

        String spinsDesc;
        if (nodePath.spins == null) {
          spinsDesc = "unknown";
        } else if (nodePath.spins) {
          spinsDesc = "possibly";
        } else {
          spinsDesc = "no";
        }

        FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
        sb.append(", free_space [")
            .append(fsPath.getFree())
            .append("], usable_space [")
            .append(fsPath.getAvailable())
            .append("], total_space [")
            .append(fsPath.getTotal())
            .append("], spins? [")
            .append(spinsDesc)
            .append("], mount [")
            .append(fsPath.getMount())
            .append("], type [")
            .append(fsPath.getType())
            .append(']');
      }
      logger.debug("node data locations details:{}", sb);
    } else if (logger.isInfoEnabled()) {
      FsInfo.Path totFSPath = new FsInfo.Path();
      Set<String> allTypes = new HashSet<>();
      Set<String> allSpins = new HashSet<>();
      Set<String> allMounts = new HashSet<>();
      for (NodePath nodePath : nodePaths) {
        FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
        String mount = fsPath.getMount();
        if (allMounts.contains(mount) == false) {
          allMounts.add(mount);
          String type = fsPath.getType();
          if (type != null) {
            allTypes.add(type);
          }
          Boolean spins = fsPath.getSpins();
          if (spins == null) {
            allSpins.add("unknown");
          } else if (spins.booleanValue()) {
            allSpins.add("possibly");
          } else {
            allSpins.add("no");
          }
          totFSPath.add(fsPath);
        }
      }

      // Just log a 1-line summary:
      logger.info(
          "using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
          nodePaths.length,
          allMounts,
          totFSPath.getAvailable(),
          totFSPath.getTotal(),
          toString(allSpins),
          toString(allTypes));
    }
  }

  private void maybeLogHeapDetails() {
    JvmInfo jvmInfo = JvmInfo.jvmInfo();
    ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
    String useCompressedOops = jvmInfo.useCompressedOops();
    logger.info(
        "heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
  }

  /**
   * scans the node paths and loads existing metaData file. If not found a new meta data will be
   * generated and persisted into the nodePaths
   */
  private static NodeMetaData loadOrCreateNodeMetaData(
      Settings settings, Logger logger, NodePath... nodePaths) throws IOException {
    final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
    NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, paths);
    if (metaData == null) {
      metaData = new NodeMetaData(generateNodeId(settings));
    }
    // we write again to make sure all paths have the latest state file
    NodeMetaData.FORMAT.write(metaData, paths);
    return metaData;
  }

  public static String generateNodeId(Settings settings) {
    Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
    return UUIDs.randomBase64UUID(random);
  }

  @SuppressForbidden(reason = "System.out.*")
  static void applySegmentInfosTrace(Settings settings) {
    if (ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING.get(settings)) {
      SegmentInfos.setInfoStream(System.out);
    }
  }

  private static String toString(Collection<String> items) {
    StringBuilder b = new StringBuilder();
    for (String item : items) {
      if (b.length() > 0) {
        b.append(", ");
      }
      b.append(item);
    }
    return b.toString();
  }

  /**
   * Deletes a shard data directory iff the shards locks were successfully acquired.
   *
   * @param shardId the id of the shard to delete to delete
   * @throws IOException if an IOException occurs
   */
  public void deleteShardDirectorySafe(ShardId shardId, IndexSettings indexSettings)
      throws IOException, ShardLockObtainFailedException {
    final Path[] paths = availableShardPaths(shardId);
    logger.trace("deleting shard {} directory, paths: [{}]", shardId, paths);
    try (ShardLock lock = shardLock(shardId)) {
      deleteShardDirectoryUnderLock(lock, indexSettings);
    }
  }

  /**
   * Acquires, then releases, all {@code write.lock} files in the given shard paths. The
   * "write.lock" file is assumed to be under the shard path's "index" directory as used by
   * Elasticsearch.
   *
   * @throws LockObtainFailedException if any of the locks could not be acquired
   */
  public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... shardPaths)
      throws IOException {
    Lock[] locks = new Lock[shardPaths.length];
    Directory[] dirs = new Directory[shardPaths.length];
    try {
      for (int i = 0; i < shardPaths.length; i++) {
        // resolve the directory the shard actually lives in
        Path p = shardPaths[i].resolve("index");
        // open a directory (will be immediately closed) on the shard's location
        dirs[i] =
            new SimpleFSDirectory(
                p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING));
        // create a lock for the "write.lock" file
        try {
          locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
        } catch (IOException ex) {
          throw new LockObtainFailedException(
              "unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p, ex);
        }
      }
    } finally {
      IOUtils.closeWhileHandlingException(locks);
      IOUtils.closeWhileHandlingException(dirs);
    }
  }

  /**
   * Deletes a shard data directory. Note: this method assumes that the shard lock is acquired. This
   * method will also attempt to acquire the write locks for the shard's paths before deleting the
   * data, but this is best effort, as the lock is released before the deletion happens in order to
   * allow the folder to be deleted
   *
   * @param lock the shards lock
   * @throws IOException if an IOException occurs
   * @throws ElasticsearchException if the write.lock is not acquirable
   */
  public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings)
      throws IOException {
    final ShardId shardId = lock.getShardId();
    assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
    final Path[] paths = availableShardPaths(shardId);
    logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
    acquireFSLockForPaths(indexSettings, paths);
    IOUtils.rm(paths);
    if (indexSettings.hasCustomDataPath()) {
      Path customLocation = resolveCustomLocation(indexSettings, shardId);
      logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
      acquireFSLockForPaths(indexSettings, customLocation);
      logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
      IOUtils.rm(customLocation);
    }
    logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
    assert FileSystemUtils.exists(paths) == false;
  }

  private boolean isShardLocked(ShardId id) {
    try {
      shardLock(id, 0).close();
      return false;
    } catch (ShardLockObtainFailedException ex) {
      return true;
    }
  }

  /**
   * Deletes an indexes data directory recursively iff all of the indexes shards locks were
   * successfully acquired. If any of the indexes shard directories can't be locked non of the
   * shards will be deleted
   *
   * @param index the index to delete
   * @param lockTimeoutMS how long to wait for acquiring the indices shard locks
   * @param indexSettings settings for the index being deleted
   * @throws IOException if any of the shards data directories can't be locked or deleted
   */
  public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSettings indexSettings)
      throws IOException, ShardLockObtainFailedException {
    final List<ShardLock> locks = lockAllForIndex(index, indexSettings, lockTimeoutMS);
    try {
      deleteIndexDirectoryUnderLock(index, indexSettings);
    } finally {
      IOUtils.closeWhileHandlingException(locks);
    }
  }

  /**
   * Deletes an indexes data directory recursively. Note: this method assumes that the shard lock is
   * acquired
   *
   * @param index the index to delete
   * @param indexSettings settings for the index being deleted
   */
  public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings)
      throws IOException {
    final Path[] indexPaths = indexPaths(index);
    logger.trace(
        "deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths);
    IOUtils.rm(indexPaths);
    if (indexSettings.hasCustomDataPath()) {
      Path customLocation = resolveIndexCustomLocation(indexSettings);
      logger.trace("deleting custom index {} directory [{}]", index, customLocation);
      IOUtils.rm(customLocation);
    }
  }

  /**
   * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired
   * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are
   * released.
   *
   * @param index the index to lock shards for
   * @param lockTimeoutMS how long to wait for acquiring the indices shard locks
   * @return the {@link ShardLock} instances for this index.
   * @throws IOException if an IOException occurs.
   */
  public List<ShardLock> lockAllForIndex(Index index, IndexSettings settings, long lockTimeoutMS)
      throws IOException, ShardLockObtainFailedException {
    final int numShards = settings.getNumberOfShards();
    if (numShards <= 0) {
      throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards");
    }
    logger.trace("locking all shards for index {} - [{}]", index, numShards);
    List<ShardLock> allLocks = new ArrayList<>(numShards);
    boolean success = false;
    long startTimeNS = System.nanoTime();
    try {
      for (int i = 0; i < numShards; i++) {
        long timeoutLeftMS =
            Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS)));
        allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS));
      }
      success = true;
    } finally {
      if (success == false) {
        logger.trace("unable to lock all shards for index {}", index);
        IOUtils.closeWhileHandlingException(allLocks);
      }
    }
    return allLocks;
  }

  /**
   * Tries to lock the given shards ID. A shard lock is required to perform any kind of write
   * operation on a shards data directory like deleting files, creating a new index writer or
   * recover from a different shard instance into it. If the shard lock can not be acquired a {@link
   * ShardLockObtainFailedException} is thrown.
   *
   * <p>Note: this method will return immediately if the lock can't be acquired.
   *
   * @param id the shard ID to lock
   * @return the shard lock. Call {@link ShardLock#close()} to release the lock
   */
  public ShardLock shardLock(ShardId id) throws ShardLockObtainFailedException {
    return shardLock(id, 0);
  }

  /**
   * Tries to lock the given shards ID. A shard lock is required to perform any kind of write
   * operation on a shards data directory like deleting files, creating a new index writer or
   * recover from a different shard instance into it. If the shard lock can not be acquired a {@link
   * ShardLockObtainFailedException} is thrown
   *
   * @param shardId the shard ID to lock
   * @param lockTimeoutMS the lock timeout in milliseconds
   * @return the shard lock. Call {@link ShardLock#close()} to release the lock
   */
  public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS)
      throws ShardLockObtainFailedException {
    logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS);
    final InternalShardLock shardLock;
    final boolean acquired;
    synchronized (shardLocks) {
      if (shardLocks.containsKey(shardId)) {
        shardLock = shardLocks.get(shardId);
        shardLock.incWaitCount();
        acquired = false;
      } else {
        shardLock = new InternalShardLock(shardId);
        shardLocks.put(shardId, shardLock);
        acquired = true;
      }
    }
    if (acquired == false) {
      boolean success = false;
      try {
        shardLock.acquire(lockTimeoutMS);
        success = true;
      } finally {
        if (success == false) {
          shardLock.decWaitCount();
        }
      }
    }
    logger.trace("successfully acquired shardlock for [{}]", shardId);
    return new ShardLock(shardId) { // new instance prevents double closing
      @Override
      protected void closeInternal() {
        shardLock.release();
        logger.trace("released shard lock for [{}]", shardId);
      }
    };
  }

  /** A functional interface that people can use to reference {@link #shardLock(ShardId, long)} */
  @FunctionalInterface
  public interface ShardLocker {
    ShardLock lock(ShardId shardId, long lockTimeoutMS) throws ShardLockObtainFailedException;
  }

  /**
   * Returns all currently lock shards.
   *
   * <p>Note: the shard ids return do not contain a valid Index UUID
   */
  public Set<ShardId> lockedShards() {
    synchronized (shardLocks) {
      return unmodifiableSet(new HashSet<>(shardLocks.keySet()));
    }
  }

  private final class InternalShardLock {
    /*
     * This class holds a mutex for exclusive access and timeout / wait semantics
     * and a reference count to cleanup the shard lock instance form the internal data
     * structure if nobody is waiting for it. the wait count is guarded by the same lock
     * that is used to mutate the map holding the shard locks to ensure exclusive access
     */
    private final Semaphore mutex = new Semaphore(1);
    private int waitCount = 1; // guarded by shardLocks
    private final ShardId shardId;

    InternalShardLock(ShardId shardId) {
      this.shardId = shardId;
      mutex.acquireUninterruptibly();
    }

    protected void release() {
      mutex.release();
      decWaitCount();
    }

    void incWaitCount() {
      synchronized (shardLocks) {
        assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
        waitCount++;
      }
    }

    private void decWaitCount() {
      synchronized (shardLocks) {
        assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
        --waitCount;
        logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount);
        if (waitCount == 0) {
          logger.trace("last shard lock wait decremented, removing lock for {}", shardId);
          InternalShardLock remove = shardLocks.remove(shardId);
          assert remove != null : "Removed lock was null";
        }
      }
    }

    void acquire(long timeoutInMillis) throws ShardLockObtainFailedException {
      try {
        if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) {
          throw new ShardLockObtainFailedException(
              shardId, "obtaining shard lock timed out after " + timeoutInMillis + "ms");
        }
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new ShardLockObtainFailedException(
            shardId, "thread interrupted while trying to obtain shard lock", e);
      }
    }
  }

  public boolean hasNodeFile() {
    return nodePaths != null && locks != null;
  }

  /**
   * Returns an array of all of the nodes data locations.
   *
   * @throws IllegalStateException if the node is not configured to store local locations
   */
  public Path[] nodeDataPaths() {
    assertEnvIsLocked();
    Path[] paths = new Path[nodePaths.length];
    for (int i = 0; i < paths.length; i++) {
      paths[i] = nodePaths[i].path;
    }
    return paths;
  }

  /**
   * returns the unique uuid describing this node. The uuid is persistent in the data folder of this
   * node and remains across restarts.
   */
  public String nodeId() {
    // we currently only return the ID and hide the underlying nodeMetaData implementation in order
    // to avoid
    // confusion with other "metadata" like node settings found in elasticsearch.yml. In future
    // we can encapsulate both (and more) in one NodeMetaData (or NodeSettings) object ala
    // IndexSettings
    return nodeMetaData.nodeId();
  }

  /** Returns an array of all of the {@link NodePath}s. */
  public NodePath[] nodePaths() {
    assertEnvIsLocked();
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    return nodePaths;
  }

  /** Returns all index paths. */
  public Path[] indexPaths(Index index) {
    assertEnvIsLocked();
    Path[] indexPaths = new Path[nodePaths.length];
    for (int i = 0; i < nodePaths.length; i++) {
      indexPaths[i] = nodePaths[i].resolve(index);
    }
    return indexPaths;
  }

  /**
   * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of
   * the returned paths. The returned array may contain paths to non-existing directories.
   *
   * @see IndexSettings#hasCustomDataPath()
   * @see #resolveCustomLocation(IndexSettings, ShardId)
   */
  public Path[] availableShardPaths(ShardId shardId) {
    assertEnvIsLocked();
    final NodePath[] nodePaths = nodePaths();
    final Path[] shardLocations = new Path[nodePaths.length];
    for (int i = 0; i < nodePaths.length; i++) {
      shardLocations[i] = nodePaths[i].resolve(shardId);
    }
    return shardLocations;
  }

  /** Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder */
  public Set<String> availableIndexFolders() throws IOException {
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    Set<String> indexFolders = new HashSet<>();
    for (NodePath nodePath : nodePaths) {
      Path indicesLocation = nodePath.indicesPath;
      if (Files.isDirectory(indicesLocation)) {
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
          for (Path index : stream) {
            if (Files.isDirectory(index)) {
              indexFolders.add(index.getFileName().toString());
            }
          }
        }
      }
    }
    return indexFolders;
  }

  /**
   * Resolves all existing paths to <code>indexFolderName</code> in
   * ${data.paths}/nodes/{node.id}/indices
   */
  public Path[] resolveIndexFolder(String indexFolderName) throws IOException {
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    List<Path> paths = new ArrayList<>(nodePaths.length);
    for (NodePath nodePath : nodePaths) {
      Path indexFolder = nodePath.indicesPath.resolve(indexFolderName);
      if (Files.exists(indexFolder)) {
        paths.add(indexFolder);
      }
    }
    return paths.toArray(new Path[paths.size()]);
  }

  /**
   * Tries to find all allocated shards for the given index on the current node. NOTE: This methods
   * is prone to race-conditions on the filesystem layer since it might not see directories created
   * concurrently or while it's traversing.
   *
   * @param index the index to filter shards
   * @return a set of shard IDs
   * @throws IOException if an IOException occurs
   */
  public Set<ShardId> findAllShardIds(final Index index) throws IOException {
    assert index != null;
    if (nodePaths == null || locks == null) {
      throw new IllegalStateException("node is not configured to store local location");
    }
    assertEnvIsLocked();
    final Set<ShardId> shardIds = new HashSet<>();
    final String indexUniquePathId = index.getUUID();
    for (final NodePath nodePath : nodePaths) {
      Path location = nodePath.indicesPath;
      if (Files.isDirectory(location)) {
        try (DirectoryStream<Path> indexStream = Files.newDirectoryStream(location)) {
          for (Path indexPath : indexStream) {
            if (indexUniquePathId.equals(indexPath.getFileName().toString())) {
              shardIds.addAll(findAllShardsForIndex(indexPath, index));
            }
          }
        }
      }
    }
    return shardIds;
  }

  private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index)
      throws IOException {
    assert indexPath.getFileName().toString().equals(index.getUUID());
    Set<ShardId> shardIds = new HashSet<>();
    if (Files.isDirectory(indexPath)) {
      try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
        for (Path shardPath : stream) {
          String fileName = shardPath.getFileName().toString();
          if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
            int shardId = Integer.parseInt(fileName);
            ShardId id = new ShardId(index, shardId);
            shardIds.add(id);
          }
        }
      }
    }
    return shardIds;
  }

  @Override
  public void close() {
    if (closed.compareAndSet(false, true) && locks != null) {
      for (Lock lock : locks) {
        try {
          logger.trace("releasing lock [{}]", lock);
          lock.close();
        } catch (IOException e) {
          logger.trace(
              (Supplier<?>) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e);
        }
      }
    }
  }

  private void assertEnvIsLocked() {
    if (!closed.get() && locks != null) {
      for (Lock lock : locks) {
        try {
          lock.ensureValid();
        } catch (IOException e) {
          logger.warn("lock assertion failed", e);
          throw new IllegalStateException("environment is not locked", e);
        }
      }
    }
  }

  /**
   * This method tries to write an empty file and moves it using an atomic move operation. This
   * method throws an {@link IllegalStateException} if this operation is not supported by the
   * filesystem. This test is executed on each of the data directories. This method cleans up all
   * files even in the case of an error.
   */
  public void ensureAtomicMoveSupported() throws IOException {
    final NodePath[] nodePaths = nodePaths();
    for (NodePath nodePath : nodePaths) {
      assert Files.isDirectory(nodePath.path) : nodePath.path + " is not a directory";
      final Path src = nodePath.path.resolve("__es__.tmp");
      final Path target = nodePath.path.resolve("__es__.final");
      try {
        Files.createFile(src);
        Files.move(src, target, StandardCopyOption.ATOMIC_MOVE);
      } catch (AtomicMoveNotSupportedException ex) {
        throw new IllegalStateException(
            "atomic_move is not supported by the filesystem on path ["
                + nodePath.path
                + "] atomic_move is required for elasticsearch to work correctly.",
            ex);
      } finally {
        try {
          Files.deleteIfExists(src);
        } finally {
          Files.deleteIfExists(target);
        }
      }
    }
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   */
  public Path resolveBaseCustomLocation(IndexSettings indexSettings) {
    String customDataDir = indexSettings.customDataPath();
    if (customDataDir != null) {
      // This assert is because this should be caught by MetaDataCreateIndexService
      assert sharedDataPath != null;
      if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) {
        return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
      } else {
        return sharedDataPath.resolve(customDataDir);
      }
    } else {
      throw new IllegalArgumentException(
          "no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available");
    }
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   */
  private Path resolveIndexCustomLocation(IndexSettings indexSettings) {
    return resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getUUID());
  }

  /**
   * Resolve the custom path for a index's shard. Uses the {@code IndexMetaData.SETTING_DATA_PATH}
   * setting to determine the root path for the index.
   *
   * @param indexSettings settings for the index
   * @param shardId shard to resolve the path to
   */
  public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) {
    return resolveIndexCustomLocation(indexSettings).resolve(Integer.toString(shardId.id()));
  }

  /** Returns the {@code NodePath.path} for this shard. */
  public static Path shardStatePathToDataPath(Path shardPath) {
    int count = shardPath.getNameCount();

    // Sanity check:
    assert Integer.parseInt(shardPath.getName(count - 1).toString()) >= 0;
    assert "indices".equals(shardPath.getName(count - 3).toString());

    return shardPath.getParent().getParent().getParent();
  }

  /**
   * This is a best effort to ensure that we actually have write permissions to write in all our
   * data directories. This prevents disasters if nodes are started under the wrong username etc.
   */
  private void assertCanWrite() throws IOException {
    for (Path path : nodeDataPaths()) { // check node-paths are writable
      tryWriteTempFile(path);
    }
    for (String indexFolderName : this.availableIndexFolders()) {
      for (Path indexPath :
          this.resolveIndexFolder(indexFolderName)) { // check index paths are writable
        Path indexStatePath = indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
        tryWriteTempFile(indexStatePath);
        tryWriteTempFile(indexPath);
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
          for (Path shardPath : stream) {
            String fileName = shardPath.getFileName().toString();
            if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
              Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
              Path statePath = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
              Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
              tryWriteTempFile(indexDir);
              tryWriteTempFile(translogDir);
              tryWriteTempFile(statePath);
              tryWriteTempFile(shardPath);
            }
          }
        }
      }
    }
  }

  private static void tryWriteTempFile(Path path) throws IOException {
    if (Files.exists(path)) {
      Path resolve = path.resolve(".es_temp_file");
      try {
        Files.createFile(resolve);
        Files.deleteIfExists(resolve);
      } catch (IOException ex) {
        throw new IOException(
            "failed to write in data directory [" + path + "] write permission is required", ex);
      }
    }
  }
}