Beispiel #1
0
  @Override
  public void run() {
    try {
      URL url = buildPingUrl();
      if (logger.isDebugEnabled()) {
        logger.debug("Sending UDC information to {}...", url);
      }
      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
      conn.setConnectTimeout((int) HTTP_TIMEOUT.millis());
      conn.setReadTimeout((int) HTTP_TIMEOUT.millis());

      if (conn.getResponseCode() >= 300) {
        throw new Exception(
            String.format("%s Responded with Code %d", url.getHost(), conn.getResponseCode()));
      }
      if (logger.isDebugEnabled()) {
        BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
        String line = reader.readLine();
        while (line != null) {
          logger.debug(line);
          line = reader.readLine();
        }
        reader.close();
      } else {
        conn.getInputStream().close();
      }
      successCounter.incrementAndGet();
    } catch (Exception e) {
      if (logger.isDebugEnabled()) {
        logger.debug("Error sending UDC information", e);
      }
      failCounter.incrementAndGet();
    }
  }
  void checkBulkAction(boolean indexShouldBeAutoCreated, BulkRequestBuilder builder) {
    // bulk operation do not throw MasterNotDiscoveredException exceptions. The only test that auto
    // create kicked in and failed is
    // via the timeout, as bulk operation do not wait on blocks.
    TimeValue timeout;
    if (indexShouldBeAutoCreated) {
      // we expect the bulk to fail because it will try to go to the master. Use small timeout and
      // detect it has passed
      timeout = new TimeValue(200);
    } else {
      // the request should fail very quickly - use a large timeout and make sure it didn't pass...
      timeout = new TimeValue(5000);
    }
    builder.setTimeout(timeout);
    long now = System.currentTimeMillis();
    try {
      builder.get();
      fail("Expected ClusterBlockException");
    } catch (ClusterBlockException e) {

      if (indexShouldBeAutoCreated) {
        // timeout is 200
        assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
        assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
      } else {
        // timeout is 5000
        assertThat(System.currentTimeMillis() - now, lessThan(timeout.millis() - 50));
      }
    }
  }
  /**
   * Creates a new TranslogConfig instance
   *
   * @param shardId the shard ID this translog belongs to
   * @param translogPath the path to use for the transaction log files
   * @param indexSettings the index settings used to set internal variables
   * @param durabilty the default durability setting for the translog
   * @param bigArrays a bigArrays instance used for temporarily allocating write operations
   * @param threadPool a {@link ThreadPool} to schedule async sync durability
   */
  public TranslogConfig(
      ShardId shardId,
      Path translogPath,
      Settings indexSettings,
      Translog.Durabilty durabilty,
      BigArrays bigArrays,
      @Nullable ThreadPool threadPool) {
    this.indexSettings = indexSettings;
    this.shardId = shardId;
    this.translogPath = translogPath;
    this.durabilty = durabilty;
    this.threadPool = threadPool;
    this.bigArrays = bigArrays;
    this.type =
        TranslogWriter.Type.fromString(
            indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name()));
    this.bufferSize =
        (int)
            indexSettings
                .getAsBytesSize(
                    INDEX_TRANSLOG_BUFFER_SIZE,
                    IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER)
                .bytes(); // Not really interesting, updated by IndexingMemoryController...

    syncInterval =
        indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
    if (syncInterval.millis() > 0 && threadPool != null) {
      syncOnEachOperation = false;
    } else if (syncInterval.millis() == 0) {
      syncOnEachOperation = true;
    } else {
      syncOnEachOperation = false;
    }
  }
  public void start() throws Exception {
    for (Thread t : searcherThreads) {
      t.start();
    }
    for (Thread t : indexThreads) {
      t.start();
    }
    barrier1.await();

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    barrier2.await();

    latch.await();
    stopWatch.stop();

    System.out.println("Done, took [" + stopWatch.totalTime() + "]");
    System.out.println("Sleeping before close: " + sleepBeforeClose);
    Thread.sleep(sleepBeforeClose.millis());

    for (Client client : clients) {
      client.close();
    }
    for (Node node : nodes) {
      node.close();
    }

    System.out.println("Sleeping before exit: " + sleepBeforeClose);
    Thread.sleep(sleepAfterDone.millis());
  }
 private void delay() {
   if (poll.millis() > 0L) {
     logger.info("next run waiting for {}", poll);
     try {
       Thread.sleep(poll.millis());
     } catch (InterruptedException e) {
       logger.error("Error during waiting.", e, (Object) null);
     }
   }
 }
  public void run() throws Exception {
    for (Thread t : searcherThreads) {
      t.start();
    }
    for (Thread t : writerThreads) {
      t.start();
    }
    barrier1.await();

    Refresher refresher = new Refresher();
    scheduledExecutorService.scheduleWithFixedDelay(
        refresher, refreshSchedule.millis(), refreshSchedule.millis(), TimeUnit.MILLISECONDS);
    Flusher flusher = new Flusher();
    scheduledExecutorService.scheduleWithFixedDelay(
        flusher, flushSchedule.millis(), flushSchedule.millis(), TimeUnit.MILLISECONDS);

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    barrier2.await();

    latch.await();
    stopWatch.stop();

    System.out.println("Summary");
    System.out.println(
        "   -- Readers ["
            + searcherThreads.length
            + "] with ["
            + searcherIterations
            + "] iterations");
    System.out.println(
        "   -- Writers [" + writerThreads.length + "] with [" + writerIterations + "] iterations");
    System.out.println("   -- Took: " + stopWatch.totalTime());
    System.out.println(
        "   -- Refresh [" + refresher.id + "] took: " + refresher.stopWatch.totalTime());
    System.out.println("   -- Flush [" + flusher.id + "] took: " + flusher.stopWatch.totalTime());
    System.out.println("   -- Store size " + store.estimateSize());

    scheduledExecutorService.shutdown();

    engine.refresh(new Engine.Refresh(true));
    stopWatch = new StopWatch();
    stopWatch.start();
    Engine.Searcher searcher = engine.searcher();
    TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), idGenerator.get() + 1);
    stopWatch.stop();
    System.out.println(
        "   -- Indexed ["
            + idGenerator.get()
            + "] docs, found ["
            + topDocs.totalHits
            + "] hits, took "
            + stopWatch.totalTime());
    searcher.release();
  }
  @Test
  public void testDefaultRecoverAfterTime() throws IOException {

    // check that the default is not set
    GatewayService service = createService(Settings.builder());
    assertNull(service.recoverAfterTime());

    // ensure default is set when setting expected_nodes
    service = createService(Settings.builder().put("gateway.expected_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure default is set when setting expected_data_nodes
    service = createService(Settings.builder().put("gateway.expected_data_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure default is set when setting expected_master_nodes
    service = createService(Settings.builder().put("gateway.expected_master_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure settings override default
    TimeValue timeValue = TimeValue.timeValueHours(3);
    // ensure default is set when setting expected_nodes
    service =
        createService(
            Settings.builder()
                .put("gateway.expected_nodes", 1)
                .put("gateway.recover_after_time", timeValue.toString()));
    assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis()));
  }
 private synchronized void scheduleSnapshotIfNeeded() {
   if (!shardGateway.requiresSnapshot()) {
     return;
   }
   if (!shardGateway.requiresSnapshotScheduling()) {
     return;
   }
   if (!indexShard.routingEntry().primary()) {
     // we only do snapshotting on the primary shard
     return;
   }
   if (!indexShard.routingEntry().started()) {
     // we only schedule when the cluster assumes we have started
     return;
   }
   if (snapshotScheduleFuture != null) {
     // we are already scheduling this one, ignore
     return;
   }
   if (snapshotInterval.millis() != -1) {
     // we need to schedule snapshot
     if (logger.isDebugEnabled()) {
       logger.debug("scheduling snapshot every [{}]", snapshotInterval);
     }
     snapshotScheduleFuture =
         threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, snapshotRunnable);
   }
 }
 public void testClusterHealth() throws IOException {
   ClusterState clusterState =
       ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
   int pendingTasks = randomIntBetween(0, 200);
   int inFlight = randomIntBetween(0, 200);
   int delayedUnassigned = randomIntBetween(0, 200);
   TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000));
   ClusterHealthResponse clusterHealth =
       new ClusterHealthResponse(
           "bla",
           new String[] {MetaData.ALL},
           clusterState,
           pendingTasks,
           inFlight,
           delayedUnassigned,
           pendingTaskInQueueTime);
   clusterHealth = maybeSerialize(clusterHealth);
   assertClusterHealth(clusterHealth);
   assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks));
   assertThat(clusterHealth.getNumberOfInFlightFetch(), Matchers.equalTo(inFlight));
   assertThat(clusterHealth.getDelayedUnassignedShards(), Matchers.equalTo(delayedUnassigned));
   assertThat(clusterHealth.getTaskMaxWaitingTime().millis(), is(pendingTaskInQueueTime.millis()));
   assertThat(
       clusterHealth.getActiveShardsPercent(),
       is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))));
 }
 public DefaultSearchContext(
     long id,
     ShardSearchRequest request,
     SearchShardTarget shardTarget,
     Engine.Searcher engineSearcher,
     IndexService indexService,
     IndexShard indexShard,
     ScriptService scriptService,
     PageCacheRecycler pageCacheRecycler,
     BigArrays bigArrays,
     Counter timeEstimateCounter,
     ParseFieldMatcher parseFieldMatcher,
     TimeValue timeout) {
   super(parseFieldMatcher, request);
   this.id = id;
   this.request = request;
   this.searchType = request.searchType();
   this.shardTarget = shardTarget;
   this.engineSearcher = engineSearcher;
   this.scriptService = scriptService;
   this.pageCacheRecycler = pageCacheRecycler;
   // SearchContexts use a BigArrays that can circuit break
   this.bigArrays = bigArrays.withCircuitBreaking();
   this.dfsResult = new DfsSearchResult(id, shardTarget);
   this.queryResult = new QuerySearchResult(id, shardTarget);
   this.fetchResult = new FetchSearchResult(id, shardTarget);
   this.indexShard = indexShard;
   this.indexService = indexService;
   this.searcher = new ContextIndexSearcher(this, engineSearcher);
   this.timeEstimateCounter = timeEstimateCounter;
   this.timeoutInMillis = timeout.millis();
 }
Beispiel #11
0
 private static String renderValue(RestRequest request, Object value) {
   if (value == null) {
     return null;
   }
   if (value instanceof ByteSizeValue) {
     ByteSizeValue v = (ByteSizeValue) value;
     String resolution = request.param("bytes");
     if ("b".equals(resolution)) {
       return Long.toString(v.bytes());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kb());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mb());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.gb());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tb());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.pb());
     } else {
       return v.toString();
     }
   }
   if (value instanceof SizeValue) {
     SizeValue v = (SizeValue) value;
     String resolution = request.param("size");
     if ("b".equals(resolution)) {
       return Long.toString(v.singles());
     } else if ("k".equals(resolution)) {
       return Long.toString(v.kilo());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.mega());
     } else if ("g".equals(resolution)) {
       return Long.toString(v.giga());
     } else if ("t".equals(resolution)) {
       return Long.toString(v.tera());
     } else if ("p".equals(resolution)) {
       return Long.toString(v.peta());
     } else {
       return v.toString();
     }
   }
   if (value instanceof TimeValue) {
     TimeValue v = (TimeValue) value;
     String resolution = request.param("time");
     if ("ms".equals(resolution)) {
       return Long.toString(v.millis());
     } else if ("s".equals(resolution)) {
       return Long.toString(v.seconds());
     } else if ("m".equals(resolution)) {
       return Long.toString(v.minutes());
     } else if ("h".equals(resolution)) {
       return Long.toString(v.hours());
     } else {
       return v.toString();
     }
   }
   // Add additional built in data points we can render based on request parameters?
   return value.toString();
 }
 @Override
 public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext)
     throws MapperParsingException {
   TTLFieldMapper.Builder builder = ttl();
   for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator();
       iterator.hasNext(); ) {
     Map.Entry<String, Object> entry = iterator.next();
     String fieldName = Strings.toUnderscoreCase(entry.getKey());
     Object fieldNode = entry.getValue();
     if (fieldName.equals("enabled")) {
       EnabledAttributeMapper enabledState =
           nodeBooleanValue(fieldNode)
               ? EnabledAttributeMapper.ENABLED
               : EnabledAttributeMapper.DISABLED;
       builder.enabled(enabledState);
       iterator.remove();
     } else if (fieldName.equals("default")) {
       TimeValue ttlTimeValue = nodeTimeValue(fieldNode, null);
       if (ttlTimeValue != null) {
         builder.defaultTTL(ttlTimeValue.millis());
       }
       iterator.remove();
     }
   }
   return builder;
 }
 public XContentBuilder timeValueField(
     String rawFieldName, String readableFieldName, TimeValue timeValue) throws IOException {
   if (humanReadable) {
     field(readableFieldName, timeValue.toString());
   }
   field(rawFieldName, timeValue.millis());
   return this;
 }
  @Override
  public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
    if (handshakeComplete) {
      super.messageReceived(ctx, e);
    } else {
      if (e.getMessage() instanceof ChannelBuffer) {
        ChannelBuffer newBuffer = (ChannelBuffer) e.getMessage();
        buffered = ChannelBuffers.copiedBuffer(buffered, newBuffer);

        if (buffered.readableBytes() < 8) {
          return;
        }
        int payloadLength = buffered.getInt(0);
        int revision = buffered.getInt(4);

        boolean handshakeSuccessful = false;

        if (revision == 1 || revision == -1) {
          if (buffered.readableBytes() < payloadLength + 4) {
            return;
          }
          buffered.skipBytes(8);

          if (revision == 1) {
            handshakeSuccessful = handleRevision1Response(ctx, payloadLength);
          } else {
            handshakeSuccessful = handleGenericResponse(ctx, payloadLength);
          }
        } else {
          handshakeSuccessful = handleUnknownRevisionResponse(ctx);
        }

        if (!handshakeSuccessful) {
          ctx.getChannel().close();
        }

        if (keepAliveInterval.millis() > 0) {
          ctx.getPipeline()
              .addBefore(
                  ctx.getName(),
                  "found-connection-keep-alive",
                  new ConnectionKeepAliveHandler(scheduler, keepAliveInterval));
        }

        handshakeComplete = true;

        ChannelBuffer remaining = buffered.slice();
        if (remaining.readableBytes() > 0) {
          ctx.sendUpstream(
              new UpstreamMessageEvent(
                  ctx.getChannel(), remaining, ctx.getChannel().getRemoteAddress()));
        }

        ctx.getPipeline().remove(this);
      }
    }
  }
  public ProcessService(Settings settings) {
    super(settings);
    this.probe = ProcessProbe.getInstance();

    final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
    processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
    this.info = probe.processInfo();
    this.info.refreshInterval = refreshInterval.millis();
    logger.debug("using refresh_interval [{}]", refreshInterval);
  }
 public void await() {
   lock.lock();
   try {
     condition.await(timeout.millis(), TimeUnit.MILLISECONDS);
   } catch (InterruptedException e) {
     // we intentionally do not want to restore the interruption flag, we're about to shutdown
     // anyway
   } finally {
     lock.unlock();
   }
 }
 public void sendLeaveRequestBlocking(
     DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout)
     throws ElasticSearchException {
   transportService
       .submitRequest(
           masterNode,
           LeaveRequestRequestHandler.ACTION,
           new LeaveRequest(node),
           VoidTransportResponseHandler.INSTANCE_NOSPAWN)
       .txGet(timeout.millis(), TimeUnit.MILLISECONDS);
 }
  private void buildCache() {
    long sizeInBytes =
        MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();

    CacheBuilder<Key, Value> cacheBuilder =
        CacheBuilder.newBuilder()
            .maximumWeight(sizeInBytes)
            .weigher(new QueryCacheWeigher())
            .removalListener(this);
    cacheBuilder.concurrencyLevel(concurrencyLevel);

    if (expire != null) {
      cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
    }

    cache = cacheBuilder.build();
  }
 public ClusterState sendJoinRequestBlocking(
     DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout)
     throws ElasticSearchException {
   return transportService
       .submitRequest(
           masterNode,
           JoinRequestRequestHandler.ACTION,
           new JoinRequest(node, true),
           new FutureTransportResponseHandler<JoinResponse>() {
             @Override
             public JoinResponse newInstance() {
               return new JoinResponse();
             }
           })
       .txGet(timeout.millis(), TimeUnit.MILLISECONDS)
       .clusterState;
 }
 void checkWriteAction(
     boolean autoCreateIndex, TimeValue timeout, ActionRequestBuilder<?, ?, ?, ?> builder) {
   // we clean the metadata when loosing a master, therefore all operations on indices will auto
   // create it, if allowed
   long now = System.currentTimeMillis();
   try {
     builder.get();
     fail("expected ClusterBlockException or MasterNotDiscoveredException");
   } catch (ClusterBlockException | MasterNotDiscoveredException e) {
     if (e instanceof MasterNotDiscoveredException) {
       assertTrue(autoCreateIndex);
     } else {
       assertFalse(autoCreateIndex);
     }
     // verify we waited before giving up...
     assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
     assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
   }
 }
 ThreadPool.ExecutorHolder build(
     final ScalingExecutorSettings settings, final ThreadContext threadContext) {
   TimeValue keepAlive = settings.keepAlive;
   int core = settings.core;
   int max = settings.max;
   final ThreadPool.Info info =
       new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null);
   final ThreadFactory threadFactory =
       EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));
   final Executor executor =
       EsExecutors.newScaling(
           name(),
           core,
           max,
           keepAlive.millis(),
           TimeUnit.MILLISECONDS,
           threadFactory,
           threadContext);
   return new ThreadPool.ExecutorHolder(executor, info);
 }
  public void run() throws Exception {
    Node[] nodes = new Node[numberOfNodes];
    for (int i = 0; i < nodes.length; i++) {
      nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
    }
    client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();

    client
        .client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder()
                .put("index.number_of_shards", numberOfShards)
                .put("index.number_of_replicas", numberOfReplicas))
        .execute()
        .actionGet();

    logger.info("********** [START] INDEXING INITIAL DOCS");
    for (long i = 0; i < initialNumberOfDocs; i++) {
      indexDoc();
    }
    logger.info("********** [DONE ] INDEXING INITIAL DOCS");

    Indexer[] indexerThreads = new Indexer[indexers];
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i] = new Indexer();
    }
    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].start();
    }

    long testStart = System.currentTimeMillis();

    // start doing the rolling restart
    int nodeIndex = 0;
    while (true) {
      File[] nodeData =
          ((InternalNode) nodes[nodeIndex])
              .injector()
              .getInstance(NodeEnvironment.class)
              .nodeDataLocations();
      nodes[nodeIndex].close();
      if (clearNodeData) {
        FileSystemUtils.deleteRecursively(nodeData);
      }

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();

      Thread.sleep(1000);

      try {
        ClusterHealthResponse clusterHealth =
            client
                .client()
                .admin()
                .cluster()
                .prepareHealth()
                .setWaitForGreenStatus()
                .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
                .setWaitForRelocatingShards(0)
                .setTimeout("10m")
                .execute()
                .actionGet();
        if (clusterHealth.timedOut()) {
          logger.warn("timed out waiting for green status....");
        }
      } catch (Exception e) {
        logger.warn("failed to execute cluster health....");
      }

      if (++nodeIndex == nodes.length) {
        nodeIndex = 0;
      }

      if ((System.currentTimeMillis() - testStart) > period.millis()) {
        logger.info("test finished");
        break;
      }
    }

    for (int i = 0; i < indexerThreads.length; i++) {
      indexerThreads[i].close = true;
    }

    Thread.sleep(indexerThrottle.millis() + 10000);

    for (int i = 0; i < indexerThreads.length; i++) {
      if (!indexerThreads[i].closed) {
        logger.warn("thread not closed!");
      }
    }

    client.client().admin().indices().prepareRefresh().execute().actionGet();

    // check the status
    IndicesStatusResponse status =
        client.client().admin().indices().prepareStatus("test").execute().actionGet();
    for (IndexShardStatus shardStatus : status.index("test")) {
      ShardStatus shard = shardStatus.shards()[0];
      logger.info("shard [{}], docs [{}]", shard.shardId(), shard.getDocs().numDocs());
      for (ShardStatus shardStatu : shardStatus) {
        if (shard.docs().numDocs() != shardStatu.docs().numDocs()) {
          logger.warn(
              "shard doc number does not match!, got {} and {}",
              shard.docs().numDocs(),
              shardStatu.docs().numDocs());
        }
      }
    }

    // check the count
    for (int i = 0; i < (nodes.length * 5); i++) {
      CountResponse count =
          client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
      logger.info(
          "indexed [{}], count [{}], [{}]",
          count.count(),
          indexCounter.get(),
          count.count() == indexCounter.get() ? "OK" : "FAIL");
      if (count.count() != indexCounter.get()) {
        logger.warn("count does not match!");
      }
    }

    // scan all the docs, verify all have the same version based on the number of replicas
    SearchResponse searchResponse =
        client
            .client()
            .prepareSearch()
            .setSearchType(SearchType.SCAN)
            .setQuery(matchAllQuery())
            .setSize(50)
            .setScroll(TimeValue.timeValueMinutes(2))
            .execute()
            .actionGet();
    logger.info("Verifying versions for {} hits...", searchResponse.hits().totalHits());

    while (true) {
      searchResponse =
          client
              .client()
              .prepareSearchScroll(searchResponse.scrollId())
              .setScroll(TimeValue.timeValueMinutes(2))
              .execute()
              .actionGet();
      if (searchResponse.failedShards() > 0) {
        logger.warn("Search Failures " + Arrays.toString(searchResponse.shardFailures()));
      }
      for (SearchHit hit : searchResponse.hits()) {
        long version = -1;
        for (int i = 0; i < (numberOfReplicas + 1); i++) {
          GetResponse getResponse =
              client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
          if (version == -1) {
            version = getResponse.version();
          } else {
            if (version != getResponse.version()) {
              logger.warn(
                  "Doc {} has different version numbers {} and {}",
                  hit.id(),
                  version,
                  getResponse.version());
            }
          }
        }
      }
      if (searchResponse.hits().hits().length == 0) {
        break;
      }
    }
    logger.info("Done verifying versions");

    client.close();
    for (Node node : nodes) {
      node.close();
    }
  }
Beispiel #23
0
  private ExecutorHolder rebuild(
      String name,
      ExecutorHolder previousExecutorHolder,
      @Nullable Settings settings,
      Settings defaultSettings) {
    if (Names.SAME.equals(name)) {
      // Don't allow to change the "same" thread executor
      return previousExecutorHolder;
    }
    if (settings == null) {
      settings = ImmutableSettings.Builder.EMPTY_SETTINGS;
    }
    Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null;
    String type =
        settings.get(
            "type", previousInfo != null ? previousInfo.getType() : defaultSettings.get("type"));
    ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name);
    if ("same".equals(type)) {
      if (previousExecutorHolder != null) {
        logger.debug("updating thread_pool [{}], type [{}]", name, type);
      } else {
        logger.debug("creating thread_pool [{}], type [{}]", name, type);
      }
      return new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(name, type));
    } else if ("cached".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      if (previousExecutorHolder != null) {
        if ("cached".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, -1, -1, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
      }
      Executor executor =
          EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
    } else if ("fixed".equals(type)) {
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      SizeValue defaultQueueSize =
          defaultSettings.getAsSize("queue", defaultSettings.getAsSize("queue_size", null));

      if (previousExecutorHolder != null) {
        if ("fixed".equals(previousInfo.getType())) {
          SizeValue updatedQueueSize =
              settings.getAsSize(
                  "capacity",
                  settings.getAsSize(
                      "queue", settings.getAsSize("queue_size", previousInfo.getQueueSize())));
          if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) {
            int updatedSize = settings.getAsInt("size", previousInfo.getMax());
            if (previousInfo.getMax() != updatedSize) {
              logger.debug(
                  "updating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
                  name,
                  type,
                  updatedSize,
                  updatedQueueSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize);
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
              return new ExecutorHolder(
                  previousExecutorHolder.executor,
                  new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));
            }
            return previousExecutorHolder;
          }
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
        defaultQueueSize = previousInfo.getQueueSize();
      }

      int size = settings.getAsInt("size", defaultSize);
      SizeValue queueSize =
          settings.getAsSize(
              "capacity",
              settings.getAsSize("queue", settings.getAsSize("queue_size", defaultQueueSize)));
      logger.debug(
          "creating thread_pool [{}], type [{}], size [{}], queue_size [{}]",
          name,
          type,
          size,
          queueSize);
      Executor executor =
          EsExecutors.newFixed(
              size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
    } else if ("scaling".equals(type)) {
      TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
      int defaultMin = defaultSettings.getAsInt("min", 1);
      int defaultSize =
          defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
      if (previousExecutorHolder != null) {
        if ("scaling".equals(previousInfo.getType())) {
          TimeValue updatedKeepAlive =
              settings.getAsTime("keep_alive", previousInfo.getKeepAlive());
          int updatedMin = settings.getAsInt("min", previousInfo.getMin());
          int updatedSize =
              settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax()));
          if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)
              || previousInfo.getMin() != updatedMin
              || previousInfo.getMax() != updatedSize) {
            logger.debug(
                "updating thread_pool [{}], type [{}], keep_alive [{}]",
                name,
                type,
                updatedKeepAlive);
            if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS);
            }
            if (previousInfo.getMin() != updatedMin) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedMin);
            }
            if (previousInfo.getMax() != updatedSize) {
              ((EsThreadPoolExecutor) previousExecutorHolder.executor)
                  .setMaximumPoolSize(updatedSize);
            }
            return new ExecutorHolder(
                previousExecutorHolder.executor,
                new Info(name, type, updatedMin, updatedSize, updatedKeepAlive, null));
          }
          return previousExecutorHolder;
        }
        if (previousInfo.getKeepAlive() != null) {
          defaultKeepAlive = previousInfo.getKeepAlive();
        }
        if (previousInfo.getMin() >= 0) {
          defaultMin = previousInfo.getMin();
        }
        if (previousInfo.getMax() >= 0) {
          defaultSize = previousInfo.getMax();
        }
      }
      TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive);
      int min = settings.getAsInt("min", defaultMin);
      int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize));
      if (previousExecutorHolder != null) {
        logger.debug(
            "updating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      } else {
        logger.debug(
            "creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]",
            name,
            type,
            min,
            size,
            keepAlive);
      }
      Executor executor =
          EsExecutors.newScaling(
              min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
      return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
    }
    throw new ElasticSearchIllegalArgumentException(
        "No type found [" + type + "], for [" + name + "]");
  }
 boolean mustReschedule() {
   // don't re-schedule if its closed or if we don't have a single shard here..., we are done
   return indexService.closed.get() == false && closed.get() == false && interval.millis() > 0;
 }
 public Builder(TimeValue interval) {
   this.unit = null;
   this.interval = interval.millis();
 }
 /**
  * Allows to wait for all non master nodes to reply to the publish event up to a timeout
  *
  * @param timeout the timeout
  * @return true if the timeout expired or not, false otherwise
  */
 public boolean awaitAllNodes(TimeValue timeout) throws InterruptedException {
   boolean success = latch.await(timeout.millis(), TimeUnit.MILLISECONDS);
   assert !success || pendingNodes.isEmpty()
       : "response count reached 0 but still waiting for some nodes";
   return success;
 }
 /** An optional timeout to control how long search is allowed to take. */
 public SearchSourceBuilder timeout(TimeValue timeout) {
   this.timeoutInMillis = timeout.millis();
   return this;
 }
Beispiel #28
0
 public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, TimeValue interval) {
   return scheduler.scheduleWithFixedDelay(
       new LoggingRunnable(command), interval.millis(), interval.millis(), TimeUnit.MILLISECONDS);
 }
Beispiel #29
0
 public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
   if (!Names.SAME.equals(name)) {
     command = new ThreadedRunnable(command, executor(name));
   }
   return scheduler.schedule(command, delay.millis(), TimeUnit.MILLISECONDS);
 }
Beispiel #30
0
  @Inject
  public ThreadPool(Settings settings, @Nullable NodeSettingsService nodeSettingsService) {
    super(settings);

    Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);

    int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
    int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
    int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10);
    defaultExecutorTypeSettings =
        ImmutableMap.<String, Settings>builder()
            .put(
                Names.GENERIC,
                settingsBuilder().put("type", "cached").put("keep_alive", "30s").build())
            .put(
                Names.INDEX,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.BULK,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.GET,
                settingsBuilder().put("type", "fixed").put("size", availableProcessors).build())
            .put(
                Names.SEARCH,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors * 3)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.SUGGEST,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.PERCOLATE,
                settingsBuilder()
                    .put("type", "fixed")
                    .put("size", availableProcessors)
                    .put("queue_size", 1000)
                    .build())
            .put(
                Names.MANAGEMENT,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", 5)
                    .build())
            .put(
                Names.FLUSH,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.MERGE,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.REFRESH,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt10)
                    .build())
            .put(
                Names.WARMER,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(
                Names.SNAPSHOT,
                settingsBuilder()
                    .put("type", "scaling")
                    .put("keep_alive", "5m")
                    .put("size", halfProcMaxAt5)
                    .build())
            .put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
            .build();

    Map<String, ExecutorHolder> executors = Maps.newHashMap();
    for (Map.Entry<String, Settings> executor : defaultExecutorTypeSettings.entrySet()) {
      executors.put(
          executor.getKey(),
          build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue()));
    }
    executors.put(
        Names.SAME,
        new ExecutorHolder(MoreExecutors.sameThreadExecutor(), new Info(Names.SAME, "same")));
    this.executors = ImmutableMap.copyOf(executors);
    this.scheduler =
        new ScheduledThreadPoolExecutor(
            1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy());
    this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
    this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
    if (nodeSettingsService != null) {
      nodeSettingsService.addListener(new ApplySettings());
    }

    TimeValue estimatedTimeInterval =
        componentSettings.getAsTime("estimated_time_interval", TimeValue.timeValueMillis(200));
    this.estimatedTimeThread =
        new EstimatedTimeThread(
            EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis());
    this.estimatedTimeThread.start();
  }