Пример #1
0
  public boolean equalsAliases(MetaData other) {
    for (ObjectCursor<IndexMetaData> cursor : other.indices().values()) {
      IndexMetaData otherIndex = cursor.value;
      IndexMetaData thisIndex = index(otherIndex.getIndex());
      if (thisIndex == null) {
        return false;
      }
      if (otherIndex.getAliases().equals(thisIndex.getAliases()) == false) {
        return false;
      }
    }

    return true;
  }
Пример #2
0
    public static void toXContent(
        MetaData metaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
      XContentContext context = XContentContext.valueOf(params.param(CONTEXT_MODE_PARAM, "API"));

      builder.startObject("meta-data");

      builder.field("version", metaData.version());
      builder.field("cluster_uuid", metaData.clusterUUID);

      if (!metaData.persistentSettings().getAsMap().isEmpty()) {
        builder.startObject("settings");
        for (Map.Entry<String, String> entry :
            metaData.persistentSettings().getAsMap().entrySet()) {
          builder.field(entry.getKey(), entry.getValue());
        }
        builder.endObject();
      }

      if (context == XContentContext.API && !metaData.transientSettings().getAsMap().isEmpty()) {
        builder.startObject("transient_settings");
        for (Map.Entry<String, String> entry : metaData.transientSettings().getAsMap().entrySet()) {
          builder.field(entry.getKey(), entry.getValue());
        }
        builder.endObject();
      }

      builder.startObject("templates");
      for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
        IndexTemplateMetaData.Builder.toXContent(cursor.value, builder, params);
      }
      builder.endObject();

      if (context == XContentContext.API && !metaData.indices().isEmpty()) {
        builder.startObject("indices");
        for (IndexMetaData indexMetaData : metaData) {
          IndexMetaData.Builder.toXContent(indexMetaData, builder, params);
        }
        builder.endObject();
      }

      for (ObjectObjectCursor<String, Custom> cursor : metaData.customs()) {
        Custom proto = lookupPrototypeSafe(cursor.key);
        if (proto.context().contains(context)) {
          builder.startObject(cursor.key);
          cursor.value.toXContent(builder, params);
          builder.endObject();
        }
      }
      builder.endObject();
    }
 private MetaData buildMetaDataForVersion(MetaData metaData, long version) {
   ImmutableOpenMap.Builder<String, IndexMetaData> indices =
       ImmutableOpenMap.builder(metaData.indices());
   indices.put(
       "test" + version,
       IndexMetaData.builder("test" + version)
           .settings(
               Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
           .numberOfShards((int) version)
           .numberOfReplicas(0)
           .build());
   return MetaData.builder(metaData)
       .transientSettings(Settings.builder().put("test", version).build())
       .indices(indices.build())
       .build();
 }
Пример #4
0
  @Override
  public void clusterChanged(ClusterChangedEvent event) {
    if (event.state().blocks().disableStatePersistence()) {
      // reset the current metadata, we need to start fresh...
      this.currentMetaData = null;
      return;
    }

    MetaData newMetaData = event.state().metaData();

    // delete indices that were there before, but are deleted now
    // we need to do it so they won't be detected as dangling
    if (currentMetaData != null) {
      // only delete indices when we already received a state (currentMetaData != null)
      for (IndexMetaData current : currentMetaData) {
        if (!newMetaData.hasIndex(current.index())) {
          logger.debug(
              "[{}] deleting index that is no longer part of the metadata (indices: [{}])",
              current.index(),
              newMetaData.indices().keys());
          if (nodeEnv.hasNodeFile()) {
            FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
          }
          try {
            nodeIndexDeletedAction.nodeIndexStoreDeleted(
                event.state(), current.index(), event.state().nodes().localNodeId());
          } catch (Exception e) {
            logger.debug(
                "[{}] failed to notify master on local index store deletion", e, current.index());
          }
        }
      }
    }

    currentMetaData = newMetaData;
  }
  @Override
  public void clusterChanged(ClusterChangedEvent event) {
    if (event.state().blocks().disableStatePersistence()) {
      // reset the current metadata, we need to start fresh...
      this.currentMetaData = null;
      return;
    }

    MetaData newMetaData = event.state().metaData();
    // we don't check if metaData changed, since we might be called several times and we need to
    // check dangling...

    boolean success = true;
    // only applied to master node, writing the global and index level states
    if (event.state().nodes().localNode().masterNode()) {
      // check if the global state changed?
      if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) {
        try {
          writeGlobalState("changed", newMetaData, currentMetaData);
        } catch (Exception e) {
          success = false;
        }
      }

      // check and write changes in indices
      for (IndexMetaData indexMetaData : newMetaData) {
        String writeReason = null;
        IndexMetaData currentIndexMetaData;
        if (currentMetaData == null) {
          // a new event..., check from the state stored
          currentIndexMetaData = loadIndex(indexMetaData.index());
        } else {
          currentIndexMetaData = currentMetaData.index(indexMetaData.index());
        }
        if (currentIndexMetaData == null) {
          writeReason = "freshly created";
        } else if (currentIndexMetaData.version() != indexMetaData.version()) {
          writeReason =
              "version changed from ["
                  + currentIndexMetaData.version()
                  + "] to ["
                  + indexMetaData.version()
                  + "]";
        }

        // we update the writeReason only if we really need to write it
        if (writeReason == null) {
          continue;
        }

        try {
          writeIndex(writeReason, indexMetaData, currentIndexMetaData);
        } catch (Exception e) {
          success = false;
        }
      }
    }

    // delete indices that were there before, but are deleted now
    // we need to do it so they won't be detected as dangling
    if (nodeEnv.hasNodeFile()) {
      if (currentMetaData != null) {
        // only delete indices when we already received a state (currentMetaData != null)
        // and we had a go at processing dangling indices at least once
        // this will also delete the _state of the index itself
        for (IndexMetaData current : currentMetaData) {
          if (danglingIndices.containsKey(current.index())) {
            continue;
          }
          if (!newMetaData.hasIndex(current.index())) {
            logger.debug(
                "[{}] deleting index that is no longer part of the metadata (indices: [{}])",
                current.index(),
                newMetaData.indices().keySet());
            FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
          }
        }
      }
    }

    // handle dangling indices, we handle those for all nodes that have a node file (data or master)
    if (nodeEnv.hasNodeFile()) {
      if (danglingTimeout.millis() >= 0) {
        synchronized (danglingMutex) {
          for (String danglingIndex : danglingIndices.keySet()) {
            if (newMetaData.hasIndex(danglingIndex)) {
              logger.debug("[{}] no longer dangling (created), removing", danglingIndex);
              DanglingIndex removed = danglingIndices.remove(danglingIndex);
              removed.future.cancel(false);
            }
          }
          // delete indices that are no longer part of the metadata
          try {
            for (String indexName : nodeEnv.findAllIndices()) {
              // if we have the index on the metadata, don't delete it
              if (newMetaData.hasIndex(indexName)) {
                continue;
              }
              if (danglingIndices.containsKey(indexName)) {
                // already dangling, continue
                continue;
              }
              IndexMetaData indexMetaData = loadIndex(indexName);
              if (indexMetaData != null) {
                if (danglingTimeout.millis() == 0) {
                  logger.info(
                      "[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now",
                      indexName);
                  FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(indexName)));
                } else {
                  logger.info(
                      "[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]",
                      indexName,
                      danglingTimeout,
                      autoImportDangled);
                  danglingIndices.put(
                      indexName,
                      new DanglingIndex(
                          indexName,
                          threadPool.schedule(
                              danglingTimeout,
                              ThreadPool.Names.SAME,
                              new RemoveDanglingIndex(indexName))));
                }
              }
            }
          } catch (Exception e) {
            logger.warn("failed to find dangling indices", e);
          }
        }
      }
      if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) {
        final List<IndexMetaData> dangled = Lists.newArrayList();
        for (String indexName : danglingIndices.keySet()) {
          IndexMetaData indexMetaData = loadIndex(indexName);
          if (indexMetaData == null) {
            logger.debug("failed to find state for dangling index [{}]", indexName);
            continue;
          }
          // we might have someone copying over an index, renaming the directory, handle that
          if (!indexMetaData.index().equals(indexName)) {
            logger.info(
                "dangled index directory name is [{}], state name is [{}], renaming to directory name",
                indexName,
                indexMetaData.index());
            indexMetaData =
                IndexMetaData.newIndexMetaDataBuilder(indexMetaData).index(indexName).build();
          }
          if (autoImportDangled == AutoImportDangledState.CLOSED) {
            indexMetaData =
                IndexMetaData.newIndexMetaDataBuilder(indexMetaData)
                    .state(IndexMetaData.State.CLOSE)
                    .build();
          }
          if (indexMetaData != null) {
            dangled.add(indexMetaData);
          }
        }
        IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]);
        try {
          allocateDangledIndices.allocateDangled(
              dangledIndices,
              new LocalAllocateDangledIndices.Listener() {
                @Override
                public void onResponse(
                    LocalAllocateDangledIndices.AllocateDangledResponse response) {
                  logger.trace("allocated dangled");
                }

                @Override
                public void onFailure(Throwable e) {
                  logger.info("failed to send allocated dangled", e);
                }
              });
        } catch (Exception e) {
          logger.warn("failed to send allocate dangled", e);
        }
      }
    }

    if (success) {
      currentMetaData = newMetaData;
    }
  }
  @Test
  public void testNoRebalanceOnPrimaryOverload() {
    Settings.Builder settings = settingsBuilder();
    AllocationService strategy =
        new AllocationService(
            settings.build(),
            randomAllocationDeciders(
                settings.build(),
                new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS),
                getRandom()),
            new ShardsAllocators(
                settings.build(),
                NoopGatewayAllocator.INSTANCE,
                new ShardsAllocator() {

                  @Override
                  public boolean rebalance(RoutingAllocation allocation) {
                    return false;
                  }

                  @Override
                  public boolean move(
                      ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
                    return false;
                  }

                  @Override
                  public void applyStartedShards(StartedRerouteAllocation allocation) {}

                  @Override
                  public void applyFailedShards(FailedRerouteAllocation allocation) {}

                  /*
                  *  // this allocator tries to rebuild this scenario where a rebalance is
                  *  // triggered solely by the primary overload on node [1] where a shard
                  *  // is rebalanced to node 0
                     routing_nodes:
                     -----node_id[0][V]
                     --------[test][0], node[0], [R], s[STARTED]
                     --------[test][4], node[0], [R], s[STARTED]
                     -----node_id[1][V]
                     --------[test][0], node[1], [P], s[STARTED]
                     --------[test][1], node[1], [P], s[STARTED]
                     --------[test][3], node[1], [R], s[STARTED]
                     -----node_id[2][V]
                     --------[test][1], node[2], [R], s[STARTED]
                     --------[test][2], node[2], [R], s[STARTED]
                     --------[test][4], node[2], [P], s[STARTED]
                     -----node_id[3][V]
                     --------[test][2], node[3], [P], s[STARTED]
                     --------[test][3], node[3], [P], s[STARTED]
                     ---- unassigned
                  */
                  @Override
                  public boolean allocateUnassigned(RoutingAllocation allocation) {
                    RoutingNodes.UnassignedShards unassigned =
                        allocation.routingNodes().unassigned();
                    boolean changed = !unassigned.isEmpty();
                    for (ShardRouting sr : unassigned) {
                      switch (sr.id()) {
                        case 0:
                          if (sr.primary()) {
                            allocation.routingNodes().initialize(sr, "node1");
                          } else {
                            allocation.routingNodes().initialize(sr, "node0");
                          }
                          break;
                        case 1:
                          if (sr.primary()) {
                            allocation.routingNodes().initialize(sr, "node1");
                          } else {
                            allocation.routingNodes().initialize(sr, "node2");
                          }
                          break;
                        case 2:
                          if (sr.primary()) {
                            allocation.routingNodes().initialize(sr, "node3");
                          } else {
                            allocation.routingNodes().initialize(sr, "node2");
                          }
                          break;
                        case 3:
                          if (sr.primary()) {
                            allocation.routingNodes().initialize(sr, "node3");
                          } else {
                            allocation.routingNodes().initialize(sr, "node1");
                          }
                          break;
                        case 4:
                          if (sr.primary()) {
                            allocation.routingNodes().initialize(sr, "node2");
                          } else {
                            allocation.routingNodes().initialize(sr, "node0");
                          }
                          break;
                      }
                    }
                    unassigned.clear();
                    return changed;
                  }
                }),
            EmptyClusterInfoService.INSTANCE);
    MetaData.Builder metaDataBuilder = MetaData.builder();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    IndexMetaData.Builder indexMeta =
        IndexMetaData.builder("test")
            .settings(settings(Version.CURRENT))
            .numberOfShards(5)
            .numberOfReplicas(1);
    metaDataBuilder = metaDataBuilder.put(indexMeta);
    MetaData metaData = metaDataBuilder.build();
    for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
      routingTableBuilder.addAsNew(cursor.value);
    }
    RoutingTable routingTable = routingTableBuilder.build();
    DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
    for (int i = 0; i < 4; i++) {
      DiscoveryNode node = newNode("node" + i);
      nodes.put(node);
    }

    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .nodes(nodes)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();

    for (RoutingNode routingNode : routingNodes) {
      for (ShardRouting shardRouting : routingNode) {
        assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
      }
    }
    strategy = createAllocationService(settings.build());

    logger.info("use the new allocator and check if it moves shards");
    routingNodes = clusterState.getRoutingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();
    for (RoutingNode routingNode : routingNodes) {
      for (ShardRouting shardRouting : routingNode) {
        assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
      }
    }

    logger.info("start the replica shards");
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    for (RoutingNode routingNode : routingNodes) {
      for (ShardRouting shardRouting : routingNode) {
        assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
      }
    }

    logger.info("rebalancing");
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    for (RoutingNode routingNode : routingNodes) {
      for (ShardRouting shardRouting : routingNode) {
        assertThat(shardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
      }
    }
  }
  private ClusterState initCluster(AllocationService strategy) {
    MetaData.Builder metaDataBuilder = MetaData.builder();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();

    for (int i = 0; i < numberOfIndices; i++) {
      IndexMetaData.Builder index =
          IndexMetaData.builder("test" + i)
              .settings(settings(Version.CURRENT))
              .numberOfShards(numberOfShards)
              .numberOfReplicas(numberOfReplicas);
      metaDataBuilder = metaDataBuilder.put(index);
    }

    MetaData metaData = metaDataBuilder.build();

    for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
      routingTableBuilder.addAsNew(cursor.value);
    }

    RoutingTable routingTable = routingTableBuilder.build();

    logger.info("start " + numberOfNodes + " nodes");
    DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
    for (int i = 0; i < numberOfNodes; i++) {
      nodes.put(newNode("node" + i));
    }
    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .nodes(nodes)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();

    logger.info("restart all the primary shards, replicas will start initializing");
    routingNodes = clusterState.getRoutingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    logger.info("start the replica shards");
    routingNodes = clusterState.getRoutingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    logger.info("complete rebalancing");
    RoutingTable prev = routingTable;
    while (true) {
      routingTable =
          strategy
              .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
              .routingTable();
      clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
      routingNodes = clusterState.getRoutingNodes();
      if (routingTable == prev) break;
      prev = routingTable;
    }

    return clusterState;
  }