private static Client startClient(Path tempDir, TransportAddress... transportAddresses) {
    Settings clientSettings =
        Settings.settingsBuilder()
            .put("name", "qa_smoke_client_" + counter.getAndIncrement())
            .put(
                InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
                true) // prevents any settings to be replaced by system properties.
            .put("client.transport.ignore_cluster_name", true)
            .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
            .put("node.mode", "network")
            .build(); // we require network here!

    TransportClient.Builder transportClientBuilder =
        TransportClient.builder().settings(clientSettings);
    TransportClient client =
        transportClientBuilder.build().addTransportAddresses(transportAddresses);

    logger.info("--> Elasticsearch Java TransportClient started");

    Exception clientException = null;
    try {
      ClusterHealthResponse health = client.admin().cluster().prepareHealth().get();
      logger.info(
          "--> connected to [{}] cluster which is running [{}] node(s).",
          health.getClusterName(),
          health.getNumberOfNodes());
    } catch (Exception e) {
      clientException = e;
    }

    assumeNoException(
        "Sounds like your cluster is not running at " + clusterAddresses, clientException);

    return client;
  }
  public void testConsistencyAfterIndexCreationFailure() {
    logger.info("--> deleting test index....");
    try {
      client().admin().indices().prepareDelete("test").get();
    } catch (IndexNotFoundException ex) {
      // Ignore
    }

    logger.info("--> creating test index with invalid settings ");
    try {
      client()
          .admin()
          .indices()
          .prepareCreate("test")
          .setSettings(settingsBuilder().put("number_of_shards", "bad"))
          .get();
      fail();
    } catch (SettingsException ex) {
      // Expected
    }

    logger.info("--> creating test index with valid settings ");
    CreateIndexResponse response =
        client()
            .admin()
            .indices()
            .prepareCreate("test")
            .setSettings(settingsBuilder().put("number_of_shards", 1))
            .get();
    assertThat(response.isAcknowledged(), equalTo(true));
  }
Beispiel #3
0
  @Override
  public void run() {
    if (logger.isDebugEnabled()) {
      logger.debug("Create task manager thread.");
    }

    do {
      logger.info("TaskManager: current task index: " + Integer.toString(currentTaskIndex));
      try {
        String output = currentTask.Run();
        logger.info("Task {[]} output: {[]}", currentTask.id(), output);
        logger.info("Task [" + currentTask.id() + "] output: " + output);
      } catch (IOException ex) {
        logger.error("TaskManager: IOException");
      } catch (InterruptedException ex) {
        logger.error("TaskManager: Interrupted Exception");
      }

      currentTask = GetNextTask();
    } while (null != currentTask);

    DeleteMappingRequest req = new DeleteMappingRequest("_river");
    req.type(river.riverName().name());
    DeleteMappingResponse resp = client.admin().indices().deleteMapping(req).actionGet();
    logger.info("TaskManager: delete request: " + resp.toString());
  }
 @Override
 protected void doClose() throws ElasticsearchException {
   int size = tasks.size();
   if (size > 0) {
     for (Future<?> f : tasks) {
       if (!f.isDone()) {
         logger.info("aborting knapsack task {}", f);
         boolean b = f.cancel(true);
         if (!b) {
           logger.error("knapsack task {} could not be cancelled", f);
         }
       }
     }
     tasks.clear();
   }
   logger.info("knapsack shutdown...");
   executor.shutdown();
   try {
     this.executor.awaitTermination(5, TimeUnit.SECONDS);
   } catch (InterruptedException e) {
     throw new ElasticsearchException(e.getMessage());
   }
   if (!executor.isShutdown()) {
     logger.info("knapsack shutdown now");
     executor.shutdownNow();
     try {
       this.executor.awaitTermination(5, TimeUnit.SECONDS);
     } catch (InterruptedException e) {
       throw new ElasticsearchException(e.getMessage());
     }
   }
   logger.info("knapsack shutdown complete");
 }
 @Override
 public BaseIngestTransportClient newIndex(String index) {
   if (client == null) {
     logger.warn("no client for create index");
     return this;
   }
   if (index == null) {
     logger.warn("no index name given to create index");
     return this;
   }
   CreateIndexRequest request = new CreateIndexRequest(index).listenerThreaded(false);
   if (getSettings() != null) {
     request.settings(getSettings());
   }
   if (getMappings() != null) {
     for (Map.Entry<String, String> me : getMappings().entrySet()) {
       request.mapping(me.getKey(), me.getValue());
     }
   }
   logger.info(
       "creating index {} with settings = {}, mappings = {}",
       index,
       getSettings() != null ? getSettings().getAsMap() : null,
       getMappings());
   try {
     client.admin().indices().create(request).actionGet();
   } catch (Exception e) {
     logger.error(e.getMessage(), e);
   }
   logger.info("index {} created", index);
   return this;
 }
  @Override
  public Node stop() {
    if (!lifecycle.moveToStopped()) {
      return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: stopping ...", Version.full(), JvmInfo.jvmInfo().pid());

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).stop();
    }
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RiversManager.class).stop();
    injector.getInstance(IndicesClusterStateService.class).stop();
    injector.getInstance(IndicesService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();
    injector.getInstance(JmxService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).stop();
    }

    logger.info("{{}}[{}]: stopped", Version.full(), JvmInfo.jvmInfo().pid());

    return this;
  }
  /** Starts the harvester for queries and/or URLs */
  public boolean runIndexAll() {
    logger.info(
        "Starting RDF harvester: endpoint [{}], queries [{}],"
            + "URIs [{}], index name [{}], typeName [{}]",
        rdfEndpoint,
        rdfQueries,
        rdfUris,
        indexName,
        typeName);

    while (true) {
      if (this.closed) {
        logger.info(
            "Ended harvest for endpoint [{}], queries [{}],"
                + "URIs [{}], index name {}, type name {}",
            rdfEndpoint,
            rdfQueries,
            rdfUris,
            indexName,
            typeName);
        return true;
      }

      /** Harvest from a SPARQL endpoint */
      if (!rdfQueries.isEmpty()) {
        harvestFromEndpoint();
      }

      /** Harvest from RDF dumps */
      harvestFromDumps();

      closed = true;
    }
  }
  protected void deleteMailsFromUserMailbox(
      final Properties props,
      final String folderName,
      final int start,
      final int deleteCount,
      final String user,
      final String password)
      throws MessagingException {
    final Store store = Session.getInstance(props).getStore();

    store.connect(user, password);
    checkStoreForTestConnection(store);
    final Folder f = store.getFolder(folderName);
    f.open(Folder.READ_WRITE);

    final int msgCount = f.getMessageCount();

    final Message[] m =
        deleteCount == -1
            ? f.getMessages()
            : f.getMessages(start, Math.min(msgCount, deleteCount + start - 1));
    int d = 0;

    for (final Message message : m) {
      message.setFlag(Flag.DELETED, true);
      logger.info(
          "Delete msgnum: {} with sid {}", message.getMessageNumber(), message.getSubject());
      d++;
    }

    f.close(true);
    logger.info("Deleted " + d + " messages");
    store.close();
  }
 @Override
 public BulkNodeClient newIndex(String index, Settings settings, Map<String, String> mappings) {
   if (closed) {
     throw new ElasticsearchIllegalStateException("client is closed");
   }
   if (client == null) {
     logger.warn("no client for create index");
     return this;
   }
   if (index == null) {
     logger.warn("no index name given to create index");
     return this;
   }
   CreateIndexRequestBuilder createIndexRequestBuilder =
       new CreateIndexRequestBuilder(client.admin().indices()).setIndex(index);
   if (settings != null) {
     logger.info("settings = {}", settings.getAsStructuredMap());
     createIndexRequestBuilder.setSettings(settings);
   }
   if (mappings != null) {
     for (String type : mappings.keySet()) {
       logger.info("found mapping for {}", type);
       createIndexRequestBuilder.addMapping(type, mappings.get(type));
     }
   }
   createIndexRequestBuilder.execute().actionGet();
   logger.info("index {} created", index);
   return this;
 }
  @Test
  public void testSingleIndexShardFailed() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        MetaData.builder()
            .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());

    logger.info("Adding one node and rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().put(newNode("node1")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Marking the shard as failed");
    RoutingNodes routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyFailedShard(
                clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
  }
  @Test
  public void rerouteExplain() {
    Settings commonSettings = settingsBuilder().build();

    logger.info("--> starting a node");
    String node_1 = cluster().startNode(commonSettings);

    assertThat(cluster().size(), equalTo(1));
    ClusterHealthResponse healthResponse =
        client().admin().cluster().prepareHealth().setWaitForNodes("1").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> create an index with 1 shard");
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
        .execute()
        .actionGet();

    ensureGreen("test");

    logger.info("--> disable allocation");
    Settings newSettings =
        settingsBuilder()
            .put(
                EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE,
                EnableAllocationDecider.Allocation.NONE.name())
            .build();
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(newSettings)
        .execute()
        .actionGet();

    logger.info("--> starting a second node");
    String node_2 = cluster().startNode(commonSettings);
    assertThat(cluster().size(), equalTo(2));
    healthResponse =
        client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> try to move the shard from node1 to node2");
    MoveAllocationCommand cmd = new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2);
    ClusterRerouteResponse resp =
        client().admin().cluster().prepareReroute().add(cmd).setExplain(true).execute().actionGet();
    RoutingExplanations e = resp.getExplanations();
    assertThat(e.explanations().size(), equalTo(1));
    RerouteExplanation explanation = e.explanations().get(0);
    assertThat(explanation.command().name(), equalTo(cmd.name()));
    assertThat(((MoveAllocationCommand) explanation.command()).shardId(), equalTo(cmd.shardId()));
    assertThat(((MoveAllocationCommand) explanation.command()).fromNode(), equalTo(cmd.fromNode()));
    assertThat(((MoveAllocationCommand) explanation.command()).toNode(), equalTo(cmd.toNode()));
    assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES));
  }
  public static void main(String[] args) {
    final int numberOfRuns = 1;
    final int numIndices = 5 * 365; // five years
    final int numShards = 6;
    final int numReplicas = 2;
    final int numberOfNodes = 30;
    final int numberOfTags = 2;
    AllocationService strategy =
        ElasticsearchAllocationTestCase.createAllocationService(
            ImmutableSettings.EMPTY, new Random(1));

    MetaData.Builder mb = MetaData.builder();
    for (int i = 1; i <= numIndices; i++) {
      mb.put(
          IndexMetaData.builder("test_" + i)
              .numberOfShards(numShards)
              .numberOfReplicas(numReplicas));
    }
    MetaData metaData = mb.build();
    RoutingTable.Builder rb = RoutingTable.builder();
    for (int i = 1; i <= numIndices; i++) {
      rb.addAsNew(metaData.index("test_" + i));
    }
    RoutingTable routingTable = rb.build();
    DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
    for (int i = 1; i <= numberOfNodes; i++) {
      nb.put(newNode("node" + i, ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
    }
    ClusterState initialClusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).nodes(nb).build();

    long start = System.currentTimeMillis();
    for (int i = 0; i < numberOfRuns; i++) {
      logger.info("[{}] starting... ", i);
      long runStart = System.currentTimeMillis();
      ClusterState clusterState = initialClusterState;
      while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
        logger.info(
            "[{}] remaining unassigned {}",
            i,
            clusterState.readOnlyRoutingNodes().unassigned().size());
        RoutingAllocation.Result result =
            strategy.applyStartedShards(
                clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
        result = strategy.reroute(clusterState);
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
      }
      logger.info(
          "[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
    }
    long took = System.currentTimeMillis() - start;
    logger.info(
        "total took {}, AVG {}",
        TimeValue.timeValueMillis(took),
        TimeValue.timeValueMillis(took / numberOfRuns));
  }
  public void testFastCloseAfterCreateContinuesCreateAfterOpen() {
    logger.info("--> creating test index that cannot be allocated");
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            Settings.settingsBuilder()
                .put("index.routing.allocation.include.tag", "no_such_node")
                .build())
        .get();

    ClusterHealthResponse health =
        client().admin().cluster().prepareHealth("test").setWaitForNodes(">=2").get();
    assertThat(health.isTimedOut(), equalTo(false));
    assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));

    client().admin().indices().prepareClose("test").get();

    logger.info("--> updating test index settings to allow allocation");
    client()
        .admin()
        .indices()
        .prepareUpdateSettings("test")
        .setSettings(
            Settings.settingsBuilder().put("index.routing.allocation.include.tag", "").build())
        .get();

    client().admin().indices().prepareOpen("test").get();

    logger.info("--> waiting for green status");
    ensureGreen();

    NumShards numShards = getNumShards("test");

    ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
    assertThat(
        stateResponse.getState().metaData().index("test").getState(),
        equalTo(IndexMetaData.State.OPEN));
    assertThat(
        stateResponse.getState().routingTable().index("test").shards().size(),
        equalTo(numShards.numPrimaries));
    assertThat(
        stateResponse
            .getState()
            .routingTable()
            .index("test")
            .shardsWithState(ShardRoutingState.STARTED)
            .size(),
        equalTo(numShards.totalNumShards));

    logger.info("--> indexing a simple document");
    client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
  }
 protected void deleteIndex(String name) {
   logger.info("Delete index [{}]", name);
   node.client().admin().indices().delete(deleteIndexRequest(name)).actionGet();
   logger.debug("Running Cluster Health");
   ClusterHealthResponse clusterHealth =
       node.client()
           .admin()
           .cluster()
           .health(clusterHealthRequest().waitForGreenStatus())
           .actionGet();
   logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
 }
 protected void deleteRiver(String name) {
   logger.info("Delete river [{}]", name);
   DeleteMappingRequest deleteMapping = new DeleteMappingRequest("_river").type(name);
   node.client().admin().indices().deleteMapping(deleteMapping).actionGet();
   logger.debug("Running Cluster Health");
   ClusterHealthResponse clusterHealth =
       node.client()
           .admin()
           .cluster()
           .health(clusterHealthRequest().waitForGreenStatus())
           .actionGet();
   logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
 }
 private Node buildNode(String id) throws IOException {
   Settings nodeSettings = settingsBuilder().put(getNodeSettings()).put("name", id).build();
   logger.info("settings={}", nodeSettings.getAsMap());
   // ES 2.1 renders NodeBuilder as useless
   Set<Class<? extends Plugin>> plugins = new HashSet<>();
   plugins.add(KnapsackPlugin.class);
   Environment environment = new Environment(nodeSettings);
   Node node = new MockNode(environment, plugins);
   AbstractClient client = (AbstractClient) node.client();
   nodes.put(id, node);
   clients.put(id, client);
   logger.info("clients={}", clients);
   return node;
 }
 private void closeNodes() throws IOException {
   logger.info("closing all clients");
   for (AbstractClient client : clients.values()) {
     client.close();
   }
   clients.clear();
   logger.info("closing all nodes");
   for (Node node : nodes.values()) {
     if (node != null) {
       node.close();
     }
   }
   nodes.clear();
   logger.info("all nodes closed");
 }
  @Test
  public void testReplicaLevel() throws IOException {

    int numberOfShards = 5;
    int replicaLevel = 4;
    int shardsAfterReplica = 0;

    final AbstractIngestClient es =
        new IngestClient()
            .newClient(ADDRESS)
            .setIndex("replicatest")
            .setType("replicatest")
            .numberOfShards(numberOfShards)
            .numberOfReplicas(0)
            .dateDetection(false)
            .timeStampFieldEnabled(false)
            .newIndex();

    try {
      for (int i = 0; i < 12345; i++) {
        es.indexDocument(
            "replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}");
      }
      es.flush();
      shardsAfterReplica = es.updateReplicaLevel(replicaLevel);
      logger.info("shardsAfterReplica={}", shardsAfterReplica);
    } catch (NoNodeAvailableException e) {
      logger.warn("skipping, no node available");
    } finally {
      // assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
      es.shutdown();
    }
  }
  /**
   * Queries the {@link #rdfEndpoint(String)} with each of the {@link #rdfQueries} and harvests the
   * results of the query.
   */
  private void harvestFromEndpoint() {

    Query query;
    QueryExecution qExec;

    for (String rdfQuery : rdfQueries) {
      if (closed) break;

      logger.info(
          "Harvesting with query: [{}] on index [{}] and type [{}]", rdfQuery, indexName, typeName);

      try {
        query = QueryFactory.create(rdfQuery);
      } catch (QueryParseException qpe) {
        logger.error("Could not parse [{}]. Please provide a relevant query. {}", rdfQuery, qpe);
        continue;
      }

      qExec = QueryExecutionFactory.sparqlService(rdfEndpoint, query);

      try {
        harvest(qExec);
      } catch (Exception e) {
        logger.error("Exception [{}] occurred while harvesting", e.getLocalizedMessage());
      } finally {
        qExec.close();
      }
    }
  }
 @Before
 public void startNodes() {
   try {
     setClusterName();
     startNode("1");
     startNode("2"); // we need 2 nodes for knapsack
     findNodeAddress();
     try {
       ClusterHealthResponse healthResponse =
           client("1")
               .execute(
                   ClusterHealthAction.INSTANCE,
                   new ClusterHealthRequest()
                       .waitForYellowStatus()
                       .timeout(TimeValue.timeValueSeconds(30)))
               .actionGet();
       if (healthResponse != null && healthResponse.isTimedOut()) {
         throw new IOException(
             "cluster state is "
                 + healthResponse.getStatus().name()
                 + ", from here on, everything will fail!");
       }
     } catch (ElasticsearchTimeoutException e) {
       throw new IOException(
           "timeout, cluster does not respond to health request, cowardly refusing to continue with operations");
     }
     logger.info("ready");
   } catch (Throwable t) {
     logger.error("startNodes failed", t);
   }
 }
Beispiel #21
0
  private void InitTasks() throws Exception {
    QueryBuilder builder = QueryBuilders.boolQuery().must(QueryBuilders.termQuery("_id", "_meta"));

    SearchRequestBuilder search = client.prepareSearch("_river");
    search.setTypes(river.riverName().name());
    search.setQuery(builder);
    SearchResponse resp = search.execute().actionGet();

    int hitCount = 0;
    for (SearchHit hit : resp.hits().getHits()) {
      logger.info(
          "Task Manager: Query response hits[ "
              + Integer.toString(hitCount)
              + "]: "
              + hit.sourceAsString());
      hitCount++;

      Map<String, Object> sourceMap = hit.sourceAsMap();
      Map<String, Object> my = (Map<String, Object>) sourceMap.get("my");
      ArrayList arr = (ArrayList) my.get("tasks");
      for (Object taskObj : arr) {
        Task newTask = new Task((Map<String, String>) taskObj, client, river.riverName().name());
        taskArr.add(newTask);
        taskMap.put(newTask.id(), newTask);
      }
    }

    currentTaskIndex = 0;
    currentTask = (Task) taskArr.get(currentTaskIndex);
  }
  private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
    logger.info(
        "now, start 1 more node, check that rebalancing will happen because we set it to always");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(
                DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node" + numberOfNodes)))
            .build();

    RoutingTable routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();

    // move initializing to started

    RoutingTable prev = routingTable;
    while (true) {
      routingTable =
          strategy
              .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
              .routingTable();
      clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
      routingNodes = clusterState.getRoutingNodes();
      if (routingTable == prev) break;
      prev = routingTable;
    }

    return clusterState;
  }
 @Override
 public void receivedRequest(long requestId, String action) {
   if (action.equals(IndicesStore.ACTION_SHARD_EXISTS)) {
     receivedShardExistsRequestLatch.countDown();
     logger.info("received: {}, relocation done", action);
   }
 }
  private boolean handleRevision1Response(ChannelHandlerContext ctx, int payloadLength)
      throws Exception {
    int code = buffered.readInt();

    int descriptionLength = buffered.readInt();
    byte[] descBytes = new byte[descriptionLength];
    buffered.readBytes(descBytes, 0, descBytes.length);

    String description = new String(descBytes, StandardCharsets.UTF_8);

    logger.debug(
        "Decoded payload with length:[{}], code:[{}], descriptionLength:[{}], description:[{}] on connection [{}]",
        payloadLength,
        code,
        descriptionLength,
        description,
        ctx.getChannel().getLocalAddress());

    if (200 <= code && code <= 299) {
      logger.info(
          "Connected to Found Elasticsearch: [{}]: [{}] on connection [{}]",
          code,
          description,
          ctx.getChannel().getLocalAddress());
      return true;
    } else {
      logger.error(
          "Unable to connect to Found Elasticsearch: [{}]: [{}] on connection [{}]",
          code,
          description,
          ctx.getChannel().getLocalAddress());
      return false;
    }
  }
 private long printTime(int minConvert, int maxConvert, int maxKeepAsList) {
   long time = time(minConvert, maxConvert, maxKeepAsList);
   log.info(
       String.format(
           Locale.ROOT, "%5d  %6d  %6d  %5d", time, minConvert, maxConvert, maxKeepAsList));
   return time;
 }
  private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) {
    logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")");
    DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());

    for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) {
      nodes.remove("node" + i);
    }

    clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();

    logger.info("start all the primary shards, replicas will start initializing");
    RoutingTable routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    logger.info("start the replica shards");
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    logger.info("rebalancing");
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    routingNodes = clusterState.getRoutingNodes();

    logger.info("complete rebalancing");
    RoutingTable prev = routingTable;
    while (true) {
      routingTable =
          strategy
              .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
              .routingTable();
      clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
      routingNodes = clusterState.getRoutingNodes();
      if (routingTable == prev) break;
      prev = routingTable;
    }

    return clusterState;
  }
  @Test
  public void simpleFlagTests() {
    AllocationService allocation =
        new AllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("creating an index with 1 shard, no replica");
    MetaData metaData =
        newMetaDataBuilder()
            .put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(0))
            .build();
    RoutingTable routingTable = routingTable().addAsNew(metaData.index("test")).build();
    ClusterState clusterState =
        newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(false));

    logger.info("adding two nodes and performing rerouting");
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .nodes(newNodesBuilder().put(newNode("node1")).put(newNode("node2")))
            .build();
    RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .routingTable(rerouteResult.routingTable())
            .build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(false));

    logger.info("start primary shard");
    rerouteResult =
        allocation.applyStartedShards(
            clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .routingTable(rerouteResult.routingTable())
            .build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(true));
  }
Beispiel #28
0
  @Override
  public Node stop() {
    if (!lifecycle.moveToStopped()) {
      return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("stopping ...");

    injector.getInstance(TribeService.class).stop();
    injector.getInstance(ResourceWatcherService.class).stop();
    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).stop();
    }

    injector.getInstance(MappingUpdatedAction.class).stop();
    injector.getInstance(RiversManager.class).stop();

    injector.getInstance(SnapshotsService.class).stop();
    // stop any changes happening as a result of cluster state changes
    injector.getInstance(IndicesClusterStateService.class).stop();
    // we close indices first, so operations won't be allowed on it
    injector.getInstance(IndexingMemoryController.class).stop();
    injector.getInstance(IndicesTTLService.class).stop();
    injector.getInstance(IndicesService.class).stop();
    // sleep a bit to let operations finish with indices service
    //        try {
    //            Thread.sleep(500);
    //        } catch (InterruptedException e) {
    //            // ignore
    //        }
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).stop();
    }

    logger.info("stopped");

    return this;
  }
 @Test
 public void testThreadedRandomDocsNodeClient() throws Exception {
   int max = Runtime.getRuntime().availableProcessors();
   int maxactions = 1000;
   final int maxloop = 12345;
   final NodeClient client =
       new NodeClient()
           .maxActionsPerBulkRequest(maxactions)
           .flushInterval(TimeValue.timeValueSeconds(600)) // disable auto flush for this test
           .newClient(client("1"))
           .newIndex("test")
           .startBulk("test");
   try {
     ThreadPoolExecutor pool =
         EsExecutors.newFixed(max, 30, EsExecutors.daemonThreadFactory("nodeclient-test"));
     final CountDownLatch latch = new CountDownLatch(max);
     for (int i = 0; i < max; i++) {
       pool.execute(
           new Runnable() {
             public void run() {
               for (int i = 0; i < maxloop; i++) {
                 client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
               }
               latch.countDown();
             }
           });
     }
     logger.info("waiting for max 60 seconds...");
     latch.await(60, TimeUnit.SECONDS);
     logger.info("flush...");
     client.flush();
     logger.info("waiting for pool shutdown...");
     pool.shutdown();
     logger.info("pool is shut down");
   } catch (NoNodeAvailableException e) {
     logger.warn("skipping, no node available");
   } finally {
     client.stopBulk("test").shutdown();
     logger.info("total bulk requests = {}", client.getState().getTotalIngest().count());
     assertEquals(max * maxloop / maxactions + 1, client.getState().getTotalIngest().count());
     if (client.hasThrowable()) {
       logger.error("error", client.getThrowable());
     }
     assertFalse(client.hasThrowable());
   }
 }
Beispiel #30
0
  public Node start() {
    if (!lifecycle.moveToStarted()) {
      return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("starting ...");

    // hack around dependency injection problem (for now...)
    injector
        .getInstance(Discovery.class)
        .setAllocationService(injector.getInstance(AllocationService.class));

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).start();
    }

    injector.getInstance(MappingUpdatedAction.class).start();
    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(IndicesTTLService.class).start();
    injector.getInstance(RiversManager.class).start();
    injector.getInstance(SnapshotsService.class).start();
    injector.getInstance(TransportService.class).start();
    injector.getInstance(ClusterService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
    discoService.waitForInitialState();

    // gateway should start after disco, so it can try and recovery from gateway on "start"
    injector.getInstance(GatewayService.class).start();

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).start();
    }
    injector.getInstance(ResourceWatcherService.class).start();
    injector.getInstance(TribeService.class).start();

    logger.info("started");

    return this;
  }