Пример #1
0
  @Override
  public void run() {
    try {
      URL url = buildPingUrl();
      if (logger.isDebugEnabled()) {
        logger.debug("Sending UDC information to {}...", url);
      }
      HttpURLConnection conn = (HttpURLConnection) url.openConnection();
      conn.setConnectTimeout((int) HTTP_TIMEOUT.millis());
      conn.setReadTimeout((int) HTTP_TIMEOUT.millis());

      if (conn.getResponseCode() >= 300) {
        throw new Exception(
            String.format("%s Responded with Code %d", url.getHost(), conn.getResponseCode()));
      }
      if (logger.isDebugEnabled()) {
        BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
        String line = reader.readLine();
        while (line != null) {
          logger.debug(line);
          line = reader.readLine();
        }
        reader.close();
      } else {
        conn.getInputStream().close();
      }
      successCounter.incrementAndGet();
    } catch (Exception e) {
      if (logger.isDebugEnabled()) {
        logger.debug("Error sending UDC information", e);
      }
      failCounter.incrementAndGet();
    }
  }
 public static TimeValue nodeTimeValue(Object node) {
   if (node instanceof Number) {
     return TimeValue.timeValueMillis(((Number) node).longValue());
   }
   return TimeValue.parseTimeValue(
       node.toString(), null, XContentMapValues.class.getSimpleName() + ".nodeTimeValue");
 }
  public void testInitialSearchParamsMisc() {
    SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
    Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id));

    TimeValue scroll = null;
    if (randomBoolean()) {
      scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test");
      searchRequest.scroll(scroll);
    }
    int size = between(0, Integer.MAX_VALUE);
    searchRequest.source().size(size);
    Boolean fetchVersion = null;
    if (randomBoolean()) {
      fetchVersion = randomBoolean();
      searchRequest.source().version(fetchVersion);
    }

    Map<String, String> params = initialSearchParams(searchRequest, remoteVersion);

    assertThat(
        params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString()));
    assertThat(params, hasEntry("size", Integer.toString(size)));
    assertThat(
        params,
        fetchVersion == null || fetchVersion == true
            ? hasEntry("version", null)
            : not(hasEntry("version", null)));
    assertThat(params, hasEntry("_source", "true"));
  }
Пример #4
0
  public void addDeletion(SearchRequestBuilder searchRequest) {
    searchRequest
        .addSort("_doc", SortOrder.ASC)
        .setScroll(TimeValue.timeValueMinutes(5))
        .setSize(100)
        // load only doc ids, not _source fields
        .setFetchSource(false);

    // this search is synchronous. An optimization would be to be non-blocking,
    // but it requires to tracking pending requests in close().
    // Same semaphore can't be reused because of potential deadlock (requires to acquire
    // two locks)
    SearchResponse searchResponse = searchRequest.get();

    while (true) {
      SearchHit[] hits = searchResponse.getHits().getHits();
      for (SearchHit hit : hits) {
        DeleteRequestBuilder deleteRequestBuilder =
            client.prepareDelete(hit.index(), hit.type(), hit.getId());
        SearchHitField routing = hit.field("_routing");
        if (routing != null) {
          deleteRequestBuilder.setRouting(routing.getValue());
        }
        add(deleteRequestBuilder.request());
      }

      String scrollId = searchResponse.getScrollId();
      searchResponse =
          client.prepareSearchScroll(scrollId).setScroll(TimeValue.timeValueMinutes(5)).get();
      if (hits.length == 0) {
        client.nativeClient().prepareClearScroll().addScrollId(scrollId).get();
        break;
      }
    }
  }
 private TestResult(
     String name, long avgRefreshTime, long avgQueryTime, ByteSizeValue fieldDataSizeInMemory) {
   this.name = name;
   this.avgRefreshTime = TimeValue.timeValueMillis(avgRefreshTime);
   this.avgQueryTime = TimeValue.timeValueMillis(avgQueryTime);
   this.fieldDataSizeInMemory = fieldDataSizeInMemory;
 }
  public void testUTCIntervalRounding() {
    Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build();
    DateTimeZone tz = DateTimeZone.UTC;
    assertThat(
        tzRounding.round(time("2009-02-03T01:01:01")),
        isDate(time("2009-02-03T00:00:00.000Z"), tz));
    assertThat(
        tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")),
        isDate(time("2009-02-03T12:00:00.000Z"), tz));
    assertThat(
        tzRounding.round(time("2009-02-03T13:01:01")),
        isDate(time("2009-02-03T12:00:00.000Z"), tz));
    assertThat(
        tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")),
        isDate(time("2009-02-04T00:00:00.000Z"), tz));

    tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build();
    assertThat(
        tzRounding.round(time("2009-02-03T01:01:01")),
        isDate(time("2009-02-03T00:00:00.000Z"), tz));
    assertThat(
        tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")),
        isDate(time("2009-02-05T00:00:00.000Z"), tz));
    assertThat(
        tzRounding.round(time("2009-02-05T13:01:01")),
        isDate(time("2009-02-05T00:00:00.000Z"), tz));
    assertThat(
        tzRounding.nextRoundingValue(time("2009-02-05T00:00:00.000Z")),
        isDate(time("2009-02-07T00:00:00.000Z"), tz));
  }
Пример #7
0
  /**
   * Creates a new TranslogConfig instance
   *
   * @param shardId the shard ID this translog belongs to
   * @param translogPath the path to use for the transaction log files
   * @param indexSettings the index settings used to set internal variables
   * @param durabilty the default durability setting for the translog
   * @param bigArrays a bigArrays instance used for temporarily allocating write operations
   * @param threadPool a {@link ThreadPool} to schedule async sync durability
   */
  public TranslogConfig(
      ShardId shardId,
      Path translogPath,
      Settings indexSettings,
      Translog.Durabilty durabilty,
      BigArrays bigArrays,
      @Nullable ThreadPool threadPool) {
    this.indexSettings = indexSettings;
    this.shardId = shardId;
    this.translogPath = translogPath;
    this.durabilty = durabilty;
    this.threadPool = threadPool;
    this.bigArrays = bigArrays;
    this.type =
        TranslogWriter.Type.fromString(
            indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name()));
    this.bufferSize =
        (int)
            indexSettings
                .getAsBytesSize(
                    INDEX_TRANSLOG_BUFFER_SIZE,
                    IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER)
                .bytes(); // Not really interesting, updated by IndexingMemoryController...

    syncInterval =
        indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
    if (syncInterval.millis() > 0 && threadPool != null) {
      syncOnEachOperation = false;
    } else if (syncInterval.millis() == 0) {
      syncOnEachOperation = true;
    } else {
      syncOnEachOperation = false;
    }
  }
  public static void main(String[] args) throws Exception {
    System.setProperty("es.logger.prefix", "");

    Settings settings =
        settingsBuilder()
            .put("index.shard.check_index", true)
            .put("gateway.type", "none")
            .put("path.data", "data/data1,data/data2")
            .build();

    RollingRestartStressTest test =
        new RollingRestartStressTest()
            .settings(settings)
            .numberOfNodes(4)
            .numberOfShards(5)
            .numberOfReplicas(1)
            .initialNumberOfDocs(1000)
            .textTokens(150)
            .numberOfFields(10)
            .cleanNodeData(false)
            .indexers(5)
            .indexerThrottle(TimeValue.timeValueMillis(50))
            .period(TimeValue.timeValueMinutes(3));

    test.run();
  }
 public void testClusterHealth() throws IOException {
   ClusterState clusterState =
       ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build();
   int pendingTasks = randomIntBetween(0, 200);
   int inFlight = randomIntBetween(0, 200);
   int delayedUnassigned = randomIntBetween(0, 200);
   TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000));
   ClusterHealthResponse clusterHealth =
       new ClusterHealthResponse(
           "bla",
           new String[] {MetaData.ALL},
           clusterState,
           pendingTasks,
           inFlight,
           delayedUnassigned,
           pendingTaskInQueueTime);
   clusterHealth = maybeSerialize(clusterHealth);
   assertClusterHealth(clusterHealth);
   assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks));
   assertThat(clusterHealth.getNumberOfInFlightFetch(), Matchers.equalTo(inFlight));
   assertThat(clusterHealth.getDelayedUnassignedShards(), Matchers.equalTo(delayedUnassigned));
   assertThat(clusterHealth.getTaskMaxWaitingTime().millis(), is(pendingTaskInQueueTime.millis()));
   assertThat(
       clusterHealth.getActiveShardsPercent(),
       is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))));
 }
Пример #10
0
 @Override
 public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext)
     throws MapperParsingException {
   TTLFieldMapper.Builder builder = ttl();
   for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator();
       iterator.hasNext(); ) {
     Map.Entry<String, Object> entry = iterator.next();
     String fieldName = Strings.toUnderscoreCase(entry.getKey());
     Object fieldNode = entry.getValue();
     if (fieldName.equals("enabled")) {
       EnabledAttributeMapper enabledState =
           nodeBooleanValue(fieldNode)
               ? EnabledAttributeMapper.ENABLED
               : EnabledAttributeMapper.DISABLED;
       builder.enabled(enabledState);
       iterator.remove();
     } else if (fieldName.equals("default")) {
       TimeValue ttlTimeValue = nodeTimeValue(fieldNode, null);
       if (ttlTimeValue != null) {
         builder.defaultTTL(ttlTimeValue.millis());
       }
       iterator.remove();
     }
   }
   return builder;
 }
Пример #11
0
  void checkBulkAction(boolean indexShouldBeAutoCreated, BulkRequestBuilder builder) {
    // bulk operation do not throw MasterNotDiscoveredException exceptions. The only test that auto
    // create kicked in and failed is
    // via the timeout, as bulk operation do not wait on blocks.
    TimeValue timeout;
    if (indexShouldBeAutoCreated) {
      // we expect the bulk to fail because it will try to go to the master. Use small timeout and
      // detect it has passed
      timeout = new TimeValue(200);
    } else {
      // the request should fail very quickly - use a large timeout and make sure it didn't pass...
      timeout = new TimeValue(5000);
    }
    builder.setTimeout(timeout);
    long now = System.currentTimeMillis();
    try {
      builder.get();
      fail("Expected ClusterBlockException");
    } catch (ClusterBlockException e) {

      if (indexShouldBeAutoCreated) {
        // timeout is 200
        assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
        assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
      } else {
        // timeout is 5000
        assertThat(System.currentTimeMillis() - now, lessThan(timeout.millis() - 50));
      }
    }
  }
  @Test
  public void testRoundingWithTimeZone() {
    MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
    time.setZone(DateTimeZone.forOffsetHours(-2));
    time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);

    MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC);
    utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);

    time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
    utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));

    assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00"));
    assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z"));
    // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates
    // time zone, the millis diff is not 24, but 22 hours
    assertThat(
        time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));

    time.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
    utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
    assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00"));
    assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z"));
    assertThat(
        time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
  }
 @Override
 protected void doExecute(
     final KnapsackPullRequest request, ActionListener<KnapsackPullResponse> listener) {
   final KnapsackState state =
       new KnapsackState().setMode("pull").setNodeName(nodeService.nodeName());
   final KnapsackPullResponse response = new KnapsackPullResponse().setState(state);
   try {
     final BulkTransportClient transportClient =
         ClientBuilder.builder()
             .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, request.getMaxActionsPerBulkRequest())
             .put(ClientBuilder.MAX_CONCURRENT_REQUESTS, request.getMaxBulkConcurrency())
             .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5))
             .put(clientSettings(client, request))
             .toBulkTransportClient();
     final BulkNodeClient nodeClient =
         ClientBuilder.builder()
             .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, request.getMaxActionsPerBulkRequest())
             .put(ClientBuilder.MAX_CONCURRENT_REQUESTS, request.getMaxBulkConcurrency())
             .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(5))
             .toBulkNodeClient(client);
     state.setTimestamp(new DateTime());
     response.setRunning(true);
     knapsack.submit(
         new Thread() {
           public void run() {
             performPull(request, state, transportClient, nodeClient);
           }
         });
     listener.onResponse(response);
   } catch (Throwable e) {
     logger.error(e.getMessage(), e);
     listener.onFailure(e);
   }
 }
Пример #14
0
 private void waitForOutstandingRequests(
     TimeValue timeOut, Semaphore requestsOutstanding, int maxRequests, String name) {
   long start = System.currentTimeMillis();
   do {
     long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
     logger.info(
         "[{}] going to try and aquire [{}] in [{}]ms [{}] available to aquire right now",
         name,
         maxRequests,
         msRemaining,
         requestsOutstanding.availablePermits());
     try {
       requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS);
       return;
     } catch (InterruptedException ie) {
       // Just keep swimming
     }
   } while ((System.currentTimeMillis() - start) < timeOut.getMillis());
   throw new ElasticsearchTimeoutException(
       "Requests were still outstanding after the timeout ["
           + timeOut
           + "] for type ["
           + name
           + "]");
 }
  @Test
  public void testDefaultRecoverAfterTime() throws IOException {

    // check that the default is not set
    GatewayService service = createService(Settings.builder());
    assertNull(service.recoverAfterTime());

    // ensure default is set when setting expected_nodes
    service = createService(Settings.builder().put("gateway.expected_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure default is set when setting expected_data_nodes
    service = createService(Settings.builder().put("gateway.expected_data_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure default is set when setting expected_master_nodes
    service = createService(Settings.builder().put("gateway.expected_master_nodes", 1));
    assertThat(
        service.recoverAfterTime(),
        Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));

    // ensure settings override default
    TimeValue timeValue = TimeValue.timeValueHours(3);
    // ensure default is set when setting expected_nodes
    service =
        createService(
            Settings.builder()
                .put("gateway.expected_nodes", 1)
                .put("gateway.recover_after_time", timeValue.toString()));
    assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis()));
  }
  public void start() throws Exception {
    for (Thread t : searcherThreads) {
      t.start();
    }
    for (Thread t : indexThreads) {
      t.start();
    }
    barrier1.await();

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    barrier2.await();

    latch.await();
    stopWatch.stop();

    System.out.println("Done, took [" + stopWatch.totalTime() + "]");
    System.out.println("Sleeping before close: " + sleepBeforeClose);
    Thread.sleep(sleepBeforeClose.millis());

    for (Client client : clients) {
      client.close();
    }
    for (Node node : nodes) {
      node.close();
    }

    System.out.println("Sleeping before exit: " + sleepBeforeClose);
    Thread.sleep(sleepAfterDone.millis());
  }
  @Inject
  public LocalGatewayMetaState(
      Settings settings,
      ThreadPool threadPool,
      NodeEnvironment nodeEnv,
      TransportNodesListGatewayMetaState nodesListGatewayMetaState,
      LocalAllocateDangledIndices allocateDangledIndices,
      NodeIndexDeletedAction nodeIndexDeletedAction)
      throws Exception {
    super(settings);
    this.nodeEnv = nodeEnv;
    this.threadPool = threadPool;
    this.format = XContentType.fromRestContentType(settings.get("format", "smile"));
    this.allocateDangledIndices = allocateDangledIndices;
    this.nodeIndexDeletedAction = nodeIndexDeletedAction;
    nodesListGatewayMetaState.init(this);

    if (this.format == XContentType.SMILE) {
      Map<String, String> params = Maps.newHashMap();
      params.put("binary", "true");
      formatParams = new ToXContent.MapParams(params);
      Map<String, String> globalOnlyParams = Maps.newHashMap();
      globalOnlyParams.put("binary", "true");
      globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true");
      globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true");
      globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
    } else {
      formatParams = ToXContent.EMPTY_PARAMS;
      Map<String, String> globalOnlyParams = Maps.newHashMap();
      globalOnlyParams.put(MetaData.PERSISTENT_ONLY_PARAM, "true");
      globalOnlyParams.put(MetaData.GLOBAL_ONLY_PARAM, "true");
      globalOnlyFormatParams = new ToXContent.MapParams(globalOnlyParams);
    }

    this.autoImportDangled =
        AutoImportDangledState.fromString(
            settings.get(
                "gateway.local.auto_import_dangled", AutoImportDangledState.YES.toString()));
    this.danglingTimeout =
        settings.getAsTime("gateway.local.dangling_timeout", TimeValue.timeValueHours(2));

    logger.debug(
        "using gateway.local.auto_import_dangled [{}], with gateway.local.dangling_timeout [{}]",
        this.autoImportDangled,
        this.danglingTimeout);

    if (DiscoveryNode.masterNode(settings)) {
      try {
        pre019Upgrade();
        long start = System.currentTimeMillis();
        loadState();
        logger.debug(
            "took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
      } catch (Exception e) {
        logger.error("failed to read local state, exiting...", e);
        throw e;
      }
    }
  }
Пример #18
0
 public XContentBuilder timeValueField(
     String rawFieldName, String readableFieldName, TimeValue timeValue) throws IOException {
   if (humanReadable) {
     field(readableFieldName, timeValue.toString());
   }
   field(rawFieldName, timeValue.millis());
   return this;
 }
  public static void main(String[] args) {
    final int numberOfRuns = 1;
    final int numIndices = 5 * 365; // five years
    final int numShards = 6;
    final int numReplicas = 2;
    final int numberOfNodes = 30;
    final int numberOfTags = 2;
    AllocationService strategy =
        ElasticsearchAllocationTestCase.createAllocationService(
            ImmutableSettings.EMPTY, new Random(1));

    MetaData.Builder mb = MetaData.builder();
    for (int i = 1; i <= numIndices; i++) {
      mb.put(
          IndexMetaData.builder("test_" + i)
              .numberOfShards(numShards)
              .numberOfReplicas(numReplicas));
    }
    MetaData metaData = mb.build();
    RoutingTable.Builder rb = RoutingTable.builder();
    for (int i = 1; i <= numIndices; i++) {
      rb.addAsNew(metaData.index("test_" + i));
    }
    RoutingTable routingTable = rb.build();
    DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
    for (int i = 1; i <= numberOfNodes; i++) {
      nb.put(newNode("node" + i, ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
    }
    ClusterState initialClusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).nodes(nb).build();

    long start = System.currentTimeMillis();
    for (int i = 0; i < numberOfRuns; i++) {
      logger.info("[{}] starting... ", i);
      long runStart = System.currentTimeMillis();
      ClusterState clusterState = initialClusterState;
      while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
        logger.info(
            "[{}] remaining unassigned {}",
            i,
            clusterState.readOnlyRoutingNodes().unassigned().size());
        RoutingAllocation.Result result =
            strategy.applyStartedShards(
                clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
        result = strategy.reroute(clusterState);
        clusterState = ClusterState.builder(clusterState).routingResult(result).build();
      }
      logger.info(
          "[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
    }
    long took = System.currentTimeMillis() - start;
    logger.info(
        "total took {}, AVG {}",
        TimeValue.timeValueMillis(took),
        TimeValue.timeValueMillis(took / numberOfRuns));
  }
Пример #20
0
 private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
   if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
     logger.warn(
         "cluster state update task [{}] took [{}] above the warn threshold of {}",
         source,
         executionTime,
         slowTaskLoggingThreshold);
   }
 }
Пример #21
0
  public ProcessService(Settings settings) {
    super(settings);
    this.probe = ProcessProbe.getInstance();

    final TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings);
    processStatsCache = new ProcessStatsCache(refreshInterval, probe.processStats());
    this.info = probe.processInfo();
    this.info.refreshInterval = refreshInterval.millis();
    logger.debug("using refresh_interval [{}]", refreshInterval);
  }
 private void delay() {
   if (poll.millis() > 0L) {
     logger.info("next run waiting for {}", poll);
     try {
       Thread.sleep(poll.millis());
     } catch (InterruptedException e) {
       logger.error("Error during waiting.", e, (Object) null);
     }
   }
 }
  public void run() throws Exception {
    for (Thread t : searcherThreads) {
      t.start();
    }
    for (Thread t : writerThreads) {
      t.start();
    }
    barrier1.await();

    Refresher refresher = new Refresher();
    scheduledExecutorService.scheduleWithFixedDelay(
        refresher, refreshSchedule.millis(), refreshSchedule.millis(), TimeUnit.MILLISECONDS);
    Flusher flusher = new Flusher();
    scheduledExecutorService.scheduleWithFixedDelay(
        flusher, flushSchedule.millis(), flushSchedule.millis(), TimeUnit.MILLISECONDS);

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    barrier2.await();

    latch.await();
    stopWatch.stop();

    System.out.println("Summary");
    System.out.println(
        "   -- Readers ["
            + searcherThreads.length
            + "] with ["
            + searcherIterations
            + "] iterations");
    System.out.println(
        "   -- Writers [" + writerThreads.length + "] with [" + writerIterations + "] iterations");
    System.out.println("   -- Took: " + stopWatch.totalTime());
    System.out.println(
        "   -- Refresh [" + refresher.id + "] took: " + refresher.stopWatch.totalTime());
    System.out.println("   -- Flush [" + flusher.id + "] took: " + flusher.stopWatch.totalTime());
    System.out.println("   -- Store size " + store.estimateSize());

    scheduledExecutorService.shutdown();

    engine.refresh(new Engine.Refresh(true));
    stopWatch = new StopWatch();
    stopWatch.start();
    Engine.Searcher searcher = engine.searcher();
    TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), idGenerator.get() + 1);
    stopWatch.stop();
    System.out.println(
        "   -- Indexed ["
            + idGenerator.get()
            + "] docs, found ["
            + topDocs.totalHits
            + "] hits, took "
            + stopWatch.totalTime());
    searcher.release();
  }
  public static void main(String[] args) throws Exception {
    NodesStressTest test =
        new NodesStressTest()
            .numberOfNodes(2)
            .indexThreads(5)
            .indexIterations(10 * 1000)
            .searcherThreads(5)
            .searchIterations(10 * 1000)
            .sleepBeforeClose(TimeValue.timeValueMinutes(10))
            .sleepAfterDone(TimeValue.timeValueMinutes(10))
            .build(EMPTY_SETTINGS);

    test.start();
  }
Пример #25
0
      @Override
      public void run() {
        try {
          startLatch.await();
          for (int j = 0; j < numberOfIds; j++) {
            for (int k = 0; k < numberOfUpdatesPerId; ++k) {
              updateRequestsOutstanding.acquire();
              UpdateRequest ur =
                  client()
                      .prepareUpdate("test", "type1", Integer.toString(j))
                      .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
                      .setRetryOnConflict(retryOnConflict)
                      .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
                      .setListenerThreaded(false)
                      .request();
              client().update(ur, new UpdateListener(j));

              deleteRequestsOutstanding.acquire();
              DeleteRequest dr =
                  client()
                      .prepareDelete("test", "type1", Integer.toString(j))
                      .setListenerThreaded(false)
                      .setOperationThreaded(false)
                      .request();
              client().delete(dr, new DeleteListener(j));
            }
          }
        } catch (Throwable e) {
          logger.error("Something went wrong", e);
          failures.add(e);
        } finally {
          try {
            waitForOutstandingRequests(
                TimeValue.timeValueSeconds(60),
                updateRequestsOutstanding,
                maxUpdateRequests,
                "Update");
            waitForOutstandingRequests(
                TimeValue.timeValueSeconds(60),
                deleteRequestsOutstanding,
                maxDeleteRequests,
                "Delete");
          } catch (ElasticsearchTimeoutException ete) {
            failures.add(ete);
          }
          latch.countDown();
        }
      }
  @Test
  public void testRandomDocsNodeClient() throws Exception {
    final NodeClient es =
        new NodeClient()
            .maxActionsPerBulkRequest(1000)
            .flushInterval(TimeValue.timeValueSeconds(10))
            .newClient(client("1"))
            .newIndex("test");

    try {
      for (int i = 0; i < 12345; i++) {
        es.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
      }
      es.flush();
    } catch (NoNodeAvailableException e) {
      logger.warn("skipping, no node available");
    } finally {
      es.shutdown();
      assertEquals(13, es.getState().getTotalIngest().count());
      if (es.hasThrowable()) {
        logger.error("error", es.getThrowable());
      }
      assertFalse(es.hasThrowable());
    }
  }
  @Inject
  public DiskThresholdDecider(
      Settings settings,
      NodeSettingsService nodeSettingsService,
      ClusterInfoService infoService,
      Client client) {
    super(settings);
    String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%");
    String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%");

    if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) {
      throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark);
    }
    if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) {
      throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark);
    }
    // Watermark is expressed in terms of used data, but we need "free" data watermark
    this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
    this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);

    this.freeBytesThresholdLow =
        thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
    this.freeBytesThresholdHigh =
        thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
    this.includeRelocations =
        settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true);
    this.rerouteInterval =
        settings.getAsTime(
            CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60));

    this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true);
    nodeSettingsService.addListener(new ApplySettings());
    infoService.addListener(new DiskListener(client));
  }
Пример #28
0
  public void testSerializeRequest() throws IOException {
    ClusterRerouteRequest req = new ClusterRerouteRequest();
    req.setRetryFailed(randomBoolean());
    req.dryRun(randomBoolean());
    req.explain(randomBoolean());
    req.add(new AllocateEmptyPrimaryAllocationCommand("foo", 1, "bar", randomBoolean()));
    req.timeout(TimeValue.timeValueMillis(randomIntBetween(0, 100)));
    BytesStreamOutput out = new BytesStreamOutput();
    req.writeTo(out);
    BytesReference bytes = out.bytes();
    NetworkModule networkModule = new NetworkModule(null, Settings.EMPTY, true);
    NamedWriteableRegistry namedWriteableRegistry =
        new NamedWriteableRegistry(networkModule.getNamedWriteables());
    StreamInput wrap =
        new NamedWriteableAwareStreamInput(bytes.streamInput(), namedWriteableRegistry);
    ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest();
    deserializedReq.readFrom(wrap);

    assertEquals(req.isRetryFailed(), deserializedReq.isRetryFailed());
    assertEquals(req.dryRun(), deserializedReq.dryRun());
    assertEquals(req.explain(), deserializedReq.explain());
    assertEquals(req.timeout(), deserializedReq.timeout());
    assertEquals(
        1,
        deserializedReq
            .getCommands()
            .commands()
            .size()); // allocation commands have their own tests
    assertEquals(
        req.getCommands().commands().size(), deserializedReq.getCommands().commands().size());
  }
  @SuppressWarnings({"unchecked"})
  @Inject
  public SimpleRiver(
      RiverName riverName, RiverSettings settings, Client client, ThreadPool threadPool) {
    super(riverName, settings);
    this.client = client;

    if (settings.settings().containsKey("simple")) {
      Map<String, Object> simpleSettings = (Map<String, Object>) settings.settings().get("simple");
      simpleNumber = XContentMapValues.nodeIntegerValue(simpleSettings.get("number"), 100);
      fieldName = XContentMapValues.nodeStringValue(simpleSettings.get("field"), "test");
      poll =
          XContentMapValues.nodeTimeValue(
              simpleSettings.get("poll"), TimeValue.timeValueMinutes(60));
    }

    logger.info(
        "creating simple stream river for [{} numbers] with field [{}]", simpleNumber, fieldName);

    if (settings.settings().containsKey("index")) {
      Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get("index");
      indexName = XContentMapValues.nodeStringValue(indexSettings.get("index"), riverName.name());
      typeName = XContentMapValues.nodeStringValue(indexSettings.get("type"), "simple_type");
      bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_size"), 100);
      bulkThreshold = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_threshold"), 10);
    } else {
      indexName = riverName.name();
      typeName = "simple_type";
      bulkSize = 100;
      bulkThreshold = 10;
    }
  }
Пример #30
0
  public ScrollChunk nextChunk() {

    final SearchResponse search;
    // make sure to return the initial hits, see
    // https://github.com/Graylog2/graylog2-server/issues/2126
    if (firstResponse == null) {
      search =
          client
              .prepareSearchScroll(scrollId)
              .setScroll(TimeValue.timeValueMinutes(1))
              .execute()
              .actionGet();
    } else {
      search = firstResponse;
      firstResponse = null;
    }

    final SearchHits hits = search.getHits();
    if (hits.getHits().length == 0) {
      // scroll exhausted
      LOG.debug("[{}] Reached end of scroll results.", queryHash, getOriginalQuery());
      return null;
    }
    LOG.debug(
        "[{}][{}] New scroll id {}, number of hits in chunk: {}",
        queryHash,
        chunkId,
        search.getScrollId(),
        hits.getHits().length);
    scrollId = search.getScrollId(); // save the id for the next request.

    return new ScrollChunk(hits, fields, chunkId++);
  }