private void processAnalyzerFactory(
      String name,
      AnalyzerProvider<?> analyzerFactory,
      Map<String, NamedAnalyzer> analyzerAliases,
      Map<String, NamedAnalyzer> analyzers) {
    /*
     * Lucene defaults positionIncrementGap to 0 in all analyzers but
     * Elasticsearch defaults them to 0 only before version 2.0
     * and 100 afterwards so we override the positionIncrementGap if it
     * doesn't match here.
     */
    int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
    if (analyzerFactory instanceof CustomAnalyzerProvider) {
      ((CustomAnalyzerProvider) analyzerFactory).build(this);
      /*
       * Custom analyzers already default to the correct, version
       * dependent positionIncrementGap and the user is be able to
       * configure the positionIncrementGap directly on the analyzer so
       * we disable overriding the positionIncrementGap to preserve the
       * user's setting.
       */
      overridePositionIncrementGap = Integer.MIN_VALUE;
    }
    Analyzer analyzerF = analyzerFactory.get();
    if (analyzerF == null) {
      throw new IllegalArgumentException(
          "analyzer [" + analyzerFactory.name() + "] created null analyzer");
    }
    NamedAnalyzer analyzer;
    if (analyzerF instanceof NamedAnalyzer) {
      // if we got a named analyzer back, use it...
      analyzer = (NamedAnalyzer) analyzerF;
      if (overridePositionIncrementGap >= 0
          && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
        // unless the positionIncrementGap needs to be overridden
        analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
      }
    } else {
      analyzer =
          new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
    }
    if (analyzers.containsKey(name)) {
      throw new IllegalStateException("already registered analyzer with name: " + name);
    }
    analyzers.put(name, analyzer);
    // TODO: remove alias support completely when we no longer support pre 5.0 indices
    final String analyzerAliasKey = "index.analysis.analyzer." + analyzerFactory.name() + ".alias";
    if (indexSettings.getSettings().get(analyzerAliasKey) != null) {
      if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha6)) {
        // do not allow alias creation if the index was created on or after v5.0 alpha6
        throw new IllegalArgumentException("setting [" + analyzerAliasKey + "] is not supported");
      }

      // the setting is now removed but we only support it for loading indices created before v5.0
      deprecationLogger.deprecated(
          "setting [{}] is only allowed on index [{}] because it was created before 5.x; "
              + "analyzer aliases can no longer be created on new indices.",
          analyzerAliasKey,
          index().getName());
      Set<String> aliases =
          Sets.newHashSet(indexSettings.getSettings().getAsArray(analyzerAliasKey));
      for (String alias : aliases) {
        if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
          throw new IllegalStateException(
              "alias ["
                  + alias
                  + "] is already used by ["
                  + analyzerAliases.get(alias).name()
                  + "]");
        }
      }
    }
  }
示例#2
0
    /** executes the actual test */
    public void execute() throws IOException {
      logger.debug("Keys in 'before' map: {}", keys);
      logger.debug("Keys to remove: {}", keysToRemove);
      logger.debug("Keys to override: {}", keysToOverride);
      logger.debug("Keys to add: {}", keysToAdd);

      logger.debug("--> creating 'before' map");
      Map<Integer, V> before = new HashMap<>();
      for (Integer key : keys) {
        before.put(key, createValue(key, true));
      }
      T beforeMap = createMap(before);

      logger.debug("--> creating 'after' map");
      Map<Integer, V> after = new HashMap<>();
      after.putAll(before);
      for (Integer key : keysToRemove) {
        after.remove(key);
      }
      for (Integer key : keysToOverride) {
        after.put(key, createValue(key, false));
      }
      for (Integer key : keysToAdd) {
        after.put(key, createValue(key, false));
      }
      T afterMap = createMap(unmodifiableMap(after));

      MapDiff<Integer, V, T> diffMap = diff(beforeMap, afterMap);

      // check properties of diffMap
      assertThat(new HashSet(diffMap.getDeletes()), equalTo(keysToRemove));
      if (diffableValues()) {
        assertThat(diffMap.getDiffs().keySet(), equalTo(keysToOverride));
        for (Integer key : keysToOverride) {
          assertThat(
              diffMap.getDiffs().get(key).apply(get(beforeMap, key)), equalTo(get(afterMap, key)));
        }
        assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAdd));
        for (Integer key : keysToAdd) {
          assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
        }
      } else {
        assertThat(diffMap.getDiffs(), equalTo(emptyMap()));
        Set<Integer> keysToAddAndOverride = Sets.union(keysToAdd, keysToOverride);
        assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAddAndOverride));
        for (Integer key : keysToAddAndOverride) {
          assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
        }
      }

      if (randomBoolean()) {
        logger.debug("--> serializing diff");
        BytesStreamOutput out = new BytesStreamOutput();
        diffMap.writeTo(out);
        StreamInput in = StreamInput.wrap(out.bytes());
        logger.debug("--> reading diff back");
        diffMap = readDiff(in);
      }
      T appliedDiffMap = diffMap.apply(beforeMap);

      // check properties of appliedDiffMap
      assertThat(
          size(appliedDiffMap), equalTo(keys.size() - keysToRemove.size() + keysToAdd.size()));
      for (Integer key : keysToRemove) {
        assertThat(get(appliedDiffMap, key), nullValue());
      }
      for (Integer key : keysUnchanged) {
        assertThat(get(appliedDiffMap, key), equalTo(get(beforeMap, key)));
      }
      for (Integer key : keysToOverride) {
        assertThat(get(appliedDiffMap, key), not(equalTo(get(beforeMap, key))));
        assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
      }
      for (Integer key : keysToAdd) {
        assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
      }
    }
  private void createStaleReplicaScenario() throws Exception {
    logger.info("--> starting 3 nodes, 1 master, 2 data");
    String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
    internalCluster().startDataOnlyNodes(2);

    assertAcked(
        client()
            .admin()
            .indices()
            .prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put("index.number_of_shards", 1)
                    .put("index.number_of_replicas", 1))
            .get());
    ensureGreen();
    logger.info("--> indexing...");
    client()
        .prepareIndex("test", "type1")
        .setSource(jsonBuilder().startObject().field("field", "value1").endObject())
        .get();
    refresh();

    ClusterState state = client().admin().cluster().prepareState().all().get().getState();
    List<ShardRouting> shards = state.routingTable().allShards("test");
    assertThat(shards.size(), equalTo(2));

    final String primaryNode;
    final String replicaNode;
    if (shards.get(0).primary()) {
      primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName();
      replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName();
    } else {
      primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().getName();
      replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().getName();
    }

    NetworkDisruption partition =
        new NetworkDisruption(
            new TwoPartitions(
                Sets.newHashSet(master, replicaNode), Collections.singleton(primaryNode)),
            new NetworkDisconnect());
    internalCluster().setDisruptionScheme(partition);
    logger.info("--> partitioning node with primary shard from rest of cluster");
    partition.startDisrupting();

    ensureStableCluster(2, master);

    logger.info("--> index a document into previous replica shard (that is now primary)");
    client(replicaNode)
        .prepareIndex("test", "type1")
        .setSource(jsonBuilder().startObject().field("field", "value1").endObject())
        .get();

    logger.info("--> shut down node that has new acknowledged document");
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));

    ensureStableCluster(1, master);

    partition.stopDisrupting();

    logger.info("--> waiting for node with old primary shard to rejoin the cluster");
    ensureStableCluster(2, master);

    logger.info("--> check that old primary shard does not get promoted to primary again");
    // kick reroute and wait for all shard states to be fetched
    client(master).admin().cluster().prepareReroute().get();
    assertBusy(
        () ->
            assertThat(
                internalCluster()
                    .getInstance(GatewayAllocator.class, master)
                    .getNumberOfInFlightFetch(),
                equalTo(0)));
    // kick reroute a second time and check that all shards are unassigned
    assertThat(
        client(master)
            .admin()
            .cluster()
            .prepareReroute()
            .get()
            .getState()
            .getRoutingNodes()
            .unassigned()
            .size(),
        equalTo(2));
  }
示例#4
0
  /**
   * Class that abstracts over specific map implementation type and value kind (Diffable or not)
   *
   * @param <T> map type
   * @param <V> value type
   */
  public abstract class MapDriver<T, V> {
    protected final Set<Integer> keys = randomPositiveIntSet();
    protected final Set<Integer> keysToRemove =
        new HashSet<>(
            randomSubsetOf(randomInt(keys.size()), keys.toArray(new Integer[keys.size()])));
    protected final Set<Integer> keysThatAreNotRemoved = Sets.difference(keys, keysToRemove);
    protected final Set<Integer> keysToOverride =
        new HashSet<>(
            randomSubsetOf(
                randomInt(keysThatAreNotRemoved.size()),
                keysThatAreNotRemoved.toArray(new Integer[keysThatAreNotRemoved.size()])));
    protected final Set<Integer> keysToAdd =
        Sets.difference(
            randomPositiveIntSet(), keys); // make sure keysToAdd does not contain elements in keys
    protected final Set<Integer> keysUnchanged =
        Sets.difference(keysThatAreNotRemoved, keysToOverride);

    protected final DiffableUtils.KeySerializer<Integer> keySerializer =
        randomBoolean()
            ? DiffableUtils.getIntKeySerializer()
            : DiffableUtils.getVIntKeySerializer();

    protected final boolean useProtoForDiffableSerialization = randomBoolean();

    private Set<Integer> randomPositiveIntSet() {
      int maxSetSize = randomInt(6);
      Set<Integer> result = new HashSet<>();
      for (int i = 0; i < maxSetSize; i++) {
        // due to duplicates, set size can be smaller than maxSetSize
        result.add(randomIntBetween(0, 100));
      }
      return result;
    }

    /** whether we operate on {@link org.elasticsearch.cluster.Diffable} values */
    protected abstract boolean diffableValues();

    /** functions that determines value in "before" or "after" map based on key */
    protected abstract V createValue(Integer key, boolean before);

    /** creates map based on JDK-based map */
    protected abstract T createMap(Map<Integer, V> values);

    /** calculates diff between two maps */
    protected abstract MapDiff<Integer, V, T> diff(T before, T after);

    /** reads diff of maps from stream */
    protected abstract MapDiff<Integer, V, T> readDiff(StreamInput in) throws IOException;

    /** gets element at key "key" in map "map" */
    protected abstract V get(T map, Integer key);

    /** returns size of given map */
    protected abstract int size(T map);

    /** executes the actual test */
    public void execute() throws IOException {
      logger.debug("Keys in 'before' map: {}", keys);
      logger.debug("Keys to remove: {}", keysToRemove);
      logger.debug("Keys to override: {}", keysToOverride);
      logger.debug("Keys to add: {}", keysToAdd);

      logger.debug("--> creating 'before' map");
      Map<Integer, V> before = new HashMap<>();
      for (Integer key : keys) {
        before.put(key, createValue(key, true));
      }
      T beforeMap = createMap(before);

      logger.debug("--> creating 'after' map");
      Map<Integer, V> after = new HashMap<>();
      after.putAll(before);
      for (Integer key : keysToRemove) {
        after.remove(key);
      }
      for (Integer key : keysToOverride) {
        after.put(key, createValue(key, false));
      }
      for (Integer key : keysToAdd) {
        after.put(key, createValue(key, false));
      }
      T afterMap = createMap(unmodifiableMap(after));

      MapDiff<Integer, V, T> diffMap = diff(beforeMap, afterMap);

      // check properties of diffMap
      assertThat(new HashSet(diffMap.getDeletes()), equalTo(keysToRemove));
      if (diffableValues()) {
        assertThat(diffMap.getDiffs().keySet(), equalTo(keysToOverride));
        for (Integer key : keysToOverride) {
          assertThat(
              diffMap.getDiffs().get(key).apply(get(beforeMap, key)), equalTo(get(afterMap, key)));
        }
        assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAdd));
        for (Integer key : keysToAdd) {
          assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
        }
      } else {
        assertThat(diffMap.getDiffs(), equalTo(emptyMap()));
        Set<Integer> keysToAddAndOverride = Sets.union(keysToAdd, keysToOverride);
        assertThat(diffMap.getUpserts().keySet(), equalTo(keysToAddAndOverride));
        for (Integer key : keysToAddAndOverride) {
          assertThat(diffMap.getUpserts().get(key), equalTo(get(afterMap, key)));
        }
      }

      if (randomBoolean()) {
        logger.debug("--> serializing diff");
        BytesStreamOutput out = new BytesStreamOutput();
        diffMap.writeTo(out);
        StreamInput in = StreamInput.wrap(out.bytes());
        logger.debug("--> reading diff back");
        diffMap = readDiff(in);
      }
      T appliedDiffMap = diffMap.apply(beforeMap);

      // check properties of appliedDiffMap
      assertThat(
          size(appliedDiffMap), equalTo(keys.size() - keysToRemove.size() + keysToAdd.size()));
      for (Integer key : keysToRemove) {
        assertThat(get(appliedDiffMap, key), nullValue());
      }
      for (Integer key : keysUnchanged) {
        assertThat(get(appliedDiffMap, key), equalTo(get(beforeMap, key)));
      }
      for (Integer key : keysToOverride) {
        assertThat(get(appliedDiffMap, key), not(equalTo(get(beforeMap, key))));
        assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
      }
      for (Integer key : keysToAdd) {
        assertThat(get(appliedDiffMap, key), equalTo(get(afterMap, key)));
      }
    }
  }
  public void testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails()
      throws InterruptedException {
    TimedClusterService timedClusterService =
        new TimedClusterService(
            Settings.builder().put("cluster.name", "ClusterServiceTests").build(),
            new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
            threadPool);
    timedClusterService.setLocalNode(
        new DiscoveryNode(
            "node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT));
    Set<DiscoveryNode> currentNodes = Collections.synchronizedSet(new HashSet<>());
    currentNodes.add(timedClusterService.localNode());
    timedClusterService.setNodeConnectionsService(
        new NodeConnectionsService(Settings.EMPTY, null, null) {
          @Override
          public void connectToNodes(List<DiscoveryNode> addedNodes) {
            currentNodes.addAll(addedNodes);
          }

          @Override
          public void disconnectFromNodes(List<DiscoveryNode> removedNodes) {
            currentNodes.removeAll(removedNodes);
          }
        });
    AtomicBoolean failToCommit = new AtomicBoolean();
    timedClusterService.setClusterStatePublisher(
        (event, ackListener) -> {
          if (failToCommit.get()) {
            throw new Discovery.FailedToCommitClusterStateException("just to test this");
          }
        });
    timedClusterService.start();
    ClusterState state = timedClusterService.state();
    final DiscoveryNodes nodes = state.nodes();
    final DiscoveryNodes.Builder nodesBuilder =
        DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
    state =
        ClusterState.builder(state)
            .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
            .nodes(nodesBuilder)
            .build();
    setState(timedClusterService, state);

    assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes())));

    final CountDownLatch latch = new CountDownLatch(1);

    // try to add node when cluster state publishing fails
    failToCommit.set(true);
    timedClusterService.submitStateUpdateTask(
        "test",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) throws Exception {
            DiscoveryNode newNode =
                new DiscoveryNode(
                    "node2",
                    buildNewFakeTransportAddress(),
                    emptyMap(),
                    emptySet(),
                    Version.CURRENT);
            return ClusterState.builder(currentState)
                .nodes(DiscoveryNodes.builder(currentState.nodes()).add(newNode))
                .build();
          }

          @Override
          public void clusterStateProcessed(
              String source, ClusterState oldState, ClusterState newState) {
            latch.countDown();
          }

          @Override
          public void onFailure(String source, Exception e) {
            latch.countDown();
          }
        });

    latch.await();
    assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes())));
    timedClusterService.close();
  }