/**
   * Time a multi-threaded access to a cache.
   *
   * @return the timing stopwatch
   */
  private <V> StopWatch timeMultiThreaded(
      String id, final Map<Integer, V> map, ValueFactory<V> factory) throws InterruptedException {

    StopWatch stopWatch = new StopWatch(id);
    for (int i = 0; i < 500; i++) {
      map.put(i, factory.newValue(i));
    }
    Thread[] threads = new Thread[30];
    stopWatch.start("Running threads");
    for (int threadIndex = 0; threadIndex < threads.length; threadIndex++) {
      threads[threadIndex] =
          new Thread("Cache access thread " + threadIndex) {
            @Override
            public void run() {
              for (int j = 0; j < 1000; j++) {
                for (int i = 0; i < 1000; i++) {
                  map.get(i);
                }
              }
            }
          };
    }
    for (Thread thread : threads) {
      thread.start();
    }

    for (Thread thread : threads) {
      if (thread.isAlive()) {
        thread.join(2000);
      }
    }
    stopWatch.stop();
    return stopWatch;
  }
Beispiel #2
0
 @Test
 public void shouldExportFile() throws Exception {
   final File file = File.createTempFile("export", ".csv");
   file.delete();
   Thread thread =
       new Thread() {
         @Override
         public void run() {
           log.info("process download dialog");
           app.getAutoItHelper()
               .winWaitAndActivate("Windows Internet Explorer", "", 5000)
               .click("Button3")
               .winWaitAndActivate("—охранить как", "", 5000)
               .send("Edit1", file.getAbsolutePath())
               .click("Button1");
         }
       };
   thread.start();
   log.info("click export");
   app.getNavigationHelper().clickExport();
   Thread.sleep(2000);
   thread.join();
   // дождатьс¤ сохранени¤ файла
   // проверить содержимое файла
 }
 @Test
 public void we_can_record_the_lifetime_of_a_test_step() throws InterruptedException {
   TestStep step = new TestStep("a narrative description");
   Thread.sleep(150);
   step.recordDuration();
   assertThat(step.getDuration(), is(greaterThanOrEqualTo(100L)));
 }
  @Test
  @Slow
  public void testIndexActions() throws Exception {
    startNode("server1");

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    client("server1").admin().indices().create(createIndexRequest("test")).actionGet();

    closeNode("server1");

    startNode("server1");
    Thread.sleep(500);
    try {
      client("server1").admin().indices().create(createIndexRequest("test")).actionGet();
      assert false : "index should exists";
    } catch (IndexAlreadyExistsException e) {
      // all is well
    }
  }
 @Test
 public void we_can_display_the_lifetime_of_a_test_step_in_seconds() throws InterruptedException {
   TestStep step = new TestStep("a narrative description");
   Thread.sleep(150);
   step.recordDuration();
   double expectedDuration = step.getDuration() / 1000.0;
   assertThat(step.getDurationInSeconds(), closeTo(expectedDuration, 0.01));
 }
 @AfterClass
 public static void clean() throws Exception {
   if (sc != null) {
     sc.stop();
     // wait for jetty & spark to properly shutdown
     Thread.sleep(TimeUnit.SECONDS.toMillis(2));
   }
 }
  @Test
  public void testTimeoutUpdateTask() throws Exception {
    Settings settings = settingsBuilder().put("discovery.type", "local").build();
    internalCluster().startNode(settings);
    ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
    final CountDownLatch block = new CountDownLatch(1);
    clusterService1.submitStateUpdateTask(
        "test1",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) {
            try {
              block.await();
            } catch (InterruptedException e) {
              fail();
            }
            return currentState;
          }

          @Override
          public void onFailure(String source, Throwable t) {
            fail();
          }
        });

    final CountDownLatch timedOut = new CountDownLatch(1);
    final AtomicBoolean executeCalled = new AtomicBoolean();
    clusterService1.submitStateUpdateTask(
        "test2",
        new TimeoutClusterStateUpdateTask() {
          @Override
          public TimeValue timeout() {
            return TimeValue.timeValueMillis(2);
          }

          @Override
          public void onFailure(String source, Throwable t) {
            timedOut.countDown();
          }

          @Override
          public ClusterState execute(ClusterState currentState) {
            executeCalled.set(true);
            return currentState;
          }

          @Override
          public void clusterStateProcessed(
              String source, ClusterState oldState, ClusterState newState) {}
        });

    assertThat(timedOut.await(500, TimeUnit.MILLISECONDS), equalTo(true));
    block.countDown();
    Thread.sleep(
        100); // sleep a bit to double check that execute on the timed out update task is not
              // called...
    assertThat(executeCalled.get(), equalTo(false));
  }
  @Ignore
  @Test
  public void testGetNullWorkflowInstances()
      throws RepositoryException, XmlRpcException, IOException, InterruptedException {

    Thread.sleep(3000);
    WorkflowInstance instance = fmc.getWorkflowInstanceById("1234");

    assertThat(instance, is(nullValue()));
  }
Beispiel #9
0
  @After
  public void after() {
    // want to wait a few milliseconds to verify calls because everything is asynchronous

    try {
      Thread.sleep(10);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
  @Test
  public void testUpdateReplicationControllerToZero() throws Exception {
    getClient().createReplicationController(contr);
    getClient().updateReplicationController(contr.getId(), 0);

    Thread.sleep(10000);

    PodList podList = getClient().getSelectedPods(contr.getLabels());
    assertNotNull(podList);
    assertNotNull(podList.getItems());
    assertEquals(0, podList.getItems().size());
  }
  private static void startXmlRpcWorkflowManager() {
    System.setProperty(
        "java.util.logging.config.file",
        new File("./src/main/resources/logging.properties").getAbsolutePath());

    try {
      System.getProperties().load(new FileInputStream("./src/main/resources/workflow.properties"));
    } catch (Exception e) {
      fail(e.getMessage());
    }
    try {
      luceneCatLoc = Files.createTempDirectory("repo").toString();
      LOG.log(Level.INFO, "Lucene instance repository: [" + luceneCatLoc + "]");
    } catch (Exception e) {
      fail(e.getMessage());
    }

    if (new File(luceneCatLoc).exists()) {
      // blow away lucene cat
      LOG.log(Level.INFO, "Removing workflow instance repository: [" + luceneCatLoc + "]");
      try {
        FileUtils.deleteDirectory(new File(luceneCatLoc));
      } catch (IOException e) {
        fail(e.getMessage());
      }
    }

    System.setProperty(
        "workflow.engine.instanceRep.factory",
        "org.apache.oodt.cas.workflow.instrepo.LuceneWorkflowInstanceRepositoryFactory");
    System.setProperty("org.apache.oodt.cas.workflow.instanceRep.lucene.idxPath", luceneCatLoc);

    try {
      System.setProperty(
          "org.apache.oodt.cas.workflow.repo.dirs",
          "file://" + new File("./src/main/resources/examples").getCanonicalPath());
      System.setProperty(
          "org.apache.oodt.cas.workflow.lifecycle.filePath",
          new File("./src/main/resources/examples/workflow-lifecycle.xml").getCanonicalPath());
    } catch (Exception e) {
      fail(e.getMessage());
    }

    try {
      wmgr = new XmlRpcWorkflowManager(WM_PORT);
      Thread.sleep(MILLIS);
    } catch (Exception e) {
      LOG.log(Level.SEVERE, e.getMessage());
      fail(e.getMessage());
    }
  }
  private static void stopXmlRpcWorkflowManager() {
    System.setProperty(
        "java.util.logging.config.file",
        new File("./src/main/resources/logging.properties").getAbsolutePath());

    try {
      System.getProperties().load(new FileInputStream("./src/main/resources/workflow.properties"));
    } catch (Exception e) {
      fail(e.getMessage());
    }
    System.setProperty(
        "workflow.engine.instanceRep.factory",
        "org.apache.oodt.cas.workflow.instrepo.LuceneWorkflowInstanceRepositoryFactory");
    System.setProperty("org.apache.oodt.cas.workflow.instanceRep.lucene.idxPath", luceneCatLoc);

    try {
      System.setProperty(
          "org.apache.oodt.cas.workflow.repo.dirs",
          "file://" + new File("./src/main/resources/examples").getCanonicalPath());
      System.setProperty(
          "org.apache.oodt.cas.workflow.lifecycle.filePath",
          new File("./src/main/resources/examples/workflow-lifecycle.xml").getCanonicalPath());
    } catch (Exception e) {
      fail(e.getMessage());
    }

    try {
      wmgr.shutdown();
    } catch (Exception e) {
      LOG.log(Level.SEVERE, e.getMessage());
      fail(e.getMessage());
    }

    /** Sleep before removing to prevent file not found issues. */
    try {
      Thread.sleep(MILLIS);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }

    if (new File(luceneCatLoc).exists()) {
      // blow away lucene cat
      LOG.log(Level.INFO, "Removing workflow instance repository: [" + luceneCatLoc + "]");
      try {
        FileUtils.deleteDirectory(new File(luceneCatLoc));
      } catch (IOException e) {
        fail(e.getMessage());
      }
    }
  }
  @Test
  public void testDoesNotUseTCCL() {
    Thread.currentThread()
        .setContextClassLoader(
            new ClassLoader() {
              @Override
              public Enumeration<URL> getResources(String name) throws IOException {
                throw new AssertionError();
              }
            });

    ServiceLocator serviceLocator = new ServiceLocator();
    serviceLocator.getService(TestService.class);
  }
  @Test
  public void succeedsWhenJdkProxyAndScheduledMethodIsPresentOnInterface()
      throws InterruptedException {
    AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
    ctx.register(Config.class, JdkProxyTxConfig.class, RepoConfigB.class);
    ctx.refresh();

    Thread.sleep(50); // allow @Scheduled method to be called several times

    MyRepositoryWithScheduledMethod repository = ctx.getBean(MyRepositoryWithScheduledMethod.class);
    CallCountingTransactionManager txManager = ctx.getBean(CallCountingTransactionManager.class);
    assertThat("repository is not a proxy", AopUtils.isAopProxy(repository), is(true));
    assertThat("@Scheduled method never called", repository.getInvocationCount(), greaterThan(0));
    assertThat("no transactions were committed", txManager.commits, greaterThan(0));
  }
Beispiel #15
0
 public static StackTraceElement getMyStackTraceElement() {
   int i = 0;
   for (final StackTraceElement ste : Thread.currentThread().getStackTrace()) {
     if ((i++ < 2)
         || ste.getClassName().matches(".*EntityWrapper.*")
         || ste.getClassName().matches(".*TxHandle.*")
         || ste.getMethodName().equals("getEntityWrapper")) {
       continue;
     } else {
       return ste;
     }
   }
   throw new RuntimeException(
       "BUG: Reached bottom of stack trace without finding any relevent frames.");
 }
 @Override
 public synchronized void add(Id id, Record record) throws IOException, InterruptedException {
   switch (record.context.getPhase()) {
     case SETUP:
       assertThat(flowId, is(nullValue()));
       flowId = record.context.getFlowId();
       break;
     case CLEANUP:
       assertThat(flowId, is(record.context.getFlowId()));
       flowId = null;
       break;
     default:
       Thread.sleep(100);
       assertThat(flowId, is(record.context.getFlowId()));
       break;
   }
   super.add(id, record);
 }
  @Test
  public void testGetPages() throws Exception {

    Thread.sleep(3000);
    WorkflowInstancePage page = fmc.getFirstPage();

    assertNotNull(page);

    WorkflowInstancePage lastpage = fmc.getLastPage();

    assertNotNull(lastpage);

    WorkflowInstancePage nextpage = fmc.getNextPage(page);

    assertNotNull(nextpage);

    WorkflowInstancePage prevpage = fmc.getPrevPage(nextpage);

    assertNotNull(prevpage);
  }
  @Test
  public void testFlowDescriptionCache() throws Exception {
    int maxCacheTime = 200;
    JobFlowDescriptionCache cache = new JobFlowDescriptionCache(maxCacheTime);
    DescribeJobFlowsRequest request1 = new DescribeJobFlowsRequest();
    request1.withJobFlowIds("jf1");
    DescribeJobFlowsRequest request2 = new DescribeJobFlowsRequest();
    request1.withJobFlowIds("jf2");

    assertNull(cache.getResponse(request1));
    assertNull(cache.getResponse(request2));

    cache.addResponse(request1, new DescribeJobFlowsResult());
    assertNotNull(cache.getResponse(request1));
    assertNull(cache.getResponse(request2));

    Thread.sleep(maxCacheTime * 2);
    assertNull(cache.getResponse(request1));
    assertNull(cache.getResponse(request2));
  }
  public synchronized List<String> process(byte[] content, int... writes) throws Exception {
    StringBuilder request = new StringBuilder(512);
    request.append("GET /ctx/path/info");
    char s = '?';
    for (int w : writes) {
      request.append(s).append("w=").append(w);
      s = '&';
    }

    request.append(" HTTP/1.1\r\n").append("Host: localhost\r\n").append("Connection: close\r\n");

    if (content != null)
      request
          .append("Content-Length: ")
          .append(content.length)
          .append("\r\n")
          .append("Content-Type: text/plain\r\n");

    request.append("\r\n");

    int port = _port;
    List<String> list = new ArrayList<>();
    try (Socket socket = new Socket("localhost", port)) {
      socket.setSoTimeout(1000000);
      OutputStream out = socket.getOutputStream();
      out.write(request.toString().getBytes(StandardCharsets.ISO_8859_1));

      if (content != null && content.length > 0) {
        Thread.sleep(100);
        out.write(content[0]);
        Thread.sleep(100);
        int half = (content.length - 1) / 2;
        out.write(content, 1, half);
        Thread.sleep(100);
        out.write(content, 1 + half, content.length - half - 1);
      }

      BufferedReader in =
          new BufferedReader(new InputStreamReader(socket.getInputStream()), 102400);

      // response line
      String line = in.readLine();
      LOG.debug("response-line: " + line);
      Assert.assertThat(line, startsWith("HTTP/1.1 200 OK"));

      // Skip headers
      while (line != null) {
        line = in.readLine();
        LOG.debug("header-line:  " + line);
        if (line.length() == 0) break;
      }

      // Get body slowly
      while (true) {
        line = in.readLine();
        if (line == null) break;
        LOG.debug("body:  " + brief(line));
        list.add(line);
        Thread.sleep(50);
      }
    }

    // check lines
    int w = 0;
    for (String line : list) {
      LOG.debug("line:  " + brief(line));
      if ("-".equals(line)) continue;
      assertEquals("Line Length", writes[w], line.length());
      assertEquals("Line Contents", line.charAt(0), '0' + (w % 10));

      w++;
      if (w < writes.length && writes[w] <= 0) w++;
    }

    if (content != null) Assert.assertEquals("Content Length", content.length, _read.get());

    return list;
  }
  @Test
  public void updateMappingConcurrently() throws Throwable {
    createIndex("test1", "test2");

    // This is important. The test assumes all nodes are aware of all indices. Due to initializing
    // shard throttling
    // not all shards are allocated with the initial create index. Wait for it..
    ensureYellow();

    final Throwable[] threadException = new Throwable[1];
    final AtomicBoolean stop = new AtomicBoolean(false);
    Thread[] threads = new Thread[3];
    final CyclicBarrier barrier = new CyclicBarrier(threads.length);
    final ArrayList<Client> clientArray = new ArrayList<>();
    for (Client c : clients()) {
      clientArray.add(c);
    }

    for (int j = 0; j < threads.length; j++) {
      threads[j] =
          new Thread(
              new Runnable() {
                @SuppressWarnings("unchecked")
                @Override
                public void run() {
                  try {
                    barrier.await();

                    for (int i = 0; i < 100; i++) {
                      if (stop.get()) {
                        return;
                      }

                      Client client1 = clientArray.get(i % clientArray.size());
                      Client client2 = clientArray.get((i + 1) % clientArray.size());
                      String indexName = i % 2 == 0 ? "test2" : "test1";
                      String typeName = "type" + (i % 10);
                      String fieldName = Thread.currentThread().getName() + "_" + i;

                      PutMappingResponse response =
                          client1
                              .admin()
                              .indices()
                              .preparePutMapping(indexName)
                              .setType(typeName)
                              .setSource(
                                  JsonXContent.contentBuilder()
                                      .startObject()
                                      .startObject(typeName)
                                      .startObject("properties")
                                      .startObject(fieldName)
                                      .field("type", "string")
                                      .endObject()
                                      .endObject()
                                      .endObject()
                                      .endObject())
                              .get();

                      assertThat(response.isAcknowledged(), equalTo(true));
                      GetMappingsResponse getMappingResponse =
                          client2.admin().indices().prepareGetMappings(indexName).get();
                      ImmutableOpenMap<String, MappingMetaData> mappings =
                          getMappingResponse.getMappings().get(indexName);
                      assertThat(mappings.containsKey(typeName), equalTo(true));
                      assertThat(
                          ((Map<String, Object>)
                                  mappings.get(typeName).getSourceAsMap().get("properties"))
                              .keySet(),
                          Matchers.hasItem(fieldName));
                    }
                  } catch (Throwable t) {
                    threadException[0] = t;
                    stop.set(true);
                  }
                }
              });

      threads[j].setName("t_" + j);
      threads[j].start();
    }

    for (Thread t : threads) t.join();

    if (threadException[0] != null) {
      throw threadException[0];
    }
  }
  @Test
  public void testPendingUpdateTask() throws Exception {
    Settings settings = settingsBuilder().put("discovery.type", "local").build();
    String node_0 = internalCluster().startNode(settings);
    internalCluster().startNodeClient(settings);

    final ClusterService clusterService =
        internalCluster().getInstance(ClusterService.class, node_0);
    final CountDownLatch block1 = new CountDownLatch(1);
    final CountDownLatch invoked1 = new CountDownLatch(1);
    clusterService.submitStateUpdateTask(
        "1",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) {
            invoked1.countDown();
            try {
              block1.await();
            } catch (InterruptedException e) {
              fail();
            }
            return currentState;
          }

          @Override
          public void onFailure(String source, Throwable t) {
            invoked1.countDown();
            fail();
          }
        });
    invoked1.await();
    final CountDownLatch invoked2 = new CountDownLatch(9);
    for (int i = 2; i <= 10; i++) {
      clusterService.submitStateUpdateTask(
          Integer.toString(i),
          new ProcessedClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              return currentState;
            }

            @Override
            public void onFailure(String source, Throwable t) {
              fail();
            }

            @Override
            public void clusterStateProcessed(
                String source, ClusterState oldState, ClusterState newState) {
              invoked2.countDown();
            }
          });
    }

    // there might be other tasks in this node, make sure to only take the ones we add into account
    // in this test

    // The tasks can be re-ordered, so we need to check out-of-order
    Set<String> controlSources =
        new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
    List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks();
    assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10));
    assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1"));
    assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true));
    for (PendingClusterTask task : pendingClusterTasks) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());

    controlSources =
        new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
    PendingClusterTasksResponse response =
        internalCluster()
            .clientNodeClient()
            .admin()
            .cluster()
            .preparePendingClusterTasks()
            .execute()
            .actionGet();
    assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10));
    assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1"));
    assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true));
    for (PendingClusterTask task : response) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());
    block1.countDown();
    invoked2.await();

    // whenever we test for no tasks, we need to awaitBusy since this is a live node
    assertTrue(
        awaitBusy(
            new Predicate<Object>() {
              @Override
              public boolean apply(Object input) {
                return clusterService.pendingTasks().isEmpty();
              }
            }));
    waitNoPendingTasksOnAll();

    final CountDownLatch block2 = new CountDownLatch(1);
    final CountDownLatch invoked3 = new CountDownLatch(1);
    clusterService.submitStateUpdateTask(
        "1",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) {
            invoked3.countDown();
            try {
              block2.await();
            } catch (InterruptedException e) {
              fail();
            }
            return currentState;
          }

          @Override
          public void onFailure(String source, Throwable t) {
            invoked3.countDown();
            fail();
          }
        });
    invoked3.await();

    for (int i = 2; i <= 5; i++) {
      clusterService.submitStateUpdateTask(
          Integer.toString(i),
          new ClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              return currentState;
            }

            @Override
            public void onFailure(String source, Throwable t) {
              fail();
            }
          });
    }
    Thread.sleep(100);

    pendingClusterTasks = clusterService.pendingTasks();
    assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5));
    controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
    for (PendingClusterTask task : pendingClusterTasks) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());

    response =
        internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get();
    assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5));
    controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
    for (PendingClusterTask task : response) {
      if (controlSources.remove(task.getSource().string())) {
        assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
      }
    }
    assertTrue(controlSources.isEmpty());
    block2.countDown();
  }
Beispiel #22
0
  @Test
  @TestLogging("_root:TRACE")
  public void testTimeoutSendExceptionWithDelayedResponse() throws Exception {
    serviceA.registerHandler(
        "sayHelloTimeoutDelayedResponse",
        new BaseTransportRequestHandler<StringMessageRequest>() {
          @Override
          public StringMessageRequest newInstance() {
            return new StringMessageRequest();
          }

          @Override
          public String executor() {
            return ThreadPool.Names.GENERIC;
          }

          @Override
          public void messageReceived(StringMessageRequest request, TransportChannel channel) {
            TimeValue sleep = TimeValue.parseTimeValue(request.message, null);
            try {
              Thread.sleep(sleep.millis());
            } catch (InterruptedException e) {
              // ignore
            }
            try {
              channel.sendResponse(new StringMessageResponse("hello " + request.message));
            } catch (IOException e) {
              e.printStackTrace();
              assertThat(e.getMessage(), false, equalTo(true));
            }
          }
        });

    TransportFuture<StringMessageResponse> res =
        serviceB.submitRequest(
            nodeA,
            "sayHelloTimeoutDelayedResponse",
            new StringMessageRequest("300ms"),
            options().withTimeout(100),
            new BaseTransportResponseHandler<StringMessageResponse>() {
              @Override
              public StringMessageResponse newInstance() {
                return new StringMessageResponse();
              }

              @Override
              public String executor() {
                return ThreadPool.Names.GENERIC;
              }

              @Override
              public void handleResponse(StringMessageResponse response) {
                assertThat("got response instead of exception", false, equalTo(true));
              }

              @Override
              public void handleException(TransportException exp) {
                assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
              }
            });

    try {
      StringMessageResponse message = res.txGet();
      assertThat("exception should be thrown", false, equalTo(true));
    } catch (Exception e) {
      assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
    }

    // sleep for 400 millis to make sure we get back the response
    Thread.sleep(400);

    for (int i = 0; i < 10; i++) {
      final int counter = i;
      // now, try and send another request, this times, with a short timeout
      res =
          serviceB.submitRequest(
              nodeA,
              "sayHelloTimeoutDelayedResponse",
              new StringMessageRequest(counter + "ms"),
              options().withTimeout(100),
              new BaseTransportResponseHandler<StringMessageResponse>() {
                @Override
                public StringMessageResponse newInstance() {
                  return new StringMessageResponse();
                }

                @Override
                public String executor() {
                  return ThreadPool.Names.GENERIC;
                }

                @Override
                public void handleResponse(StringMessageResponse response) {
                  assertThat("hello " + counter + "ms", equalTo(response.message));
                }

                @Override
                public void handleException(TransportException exp) {
                  exp.printStackTrace();
                  assertThat(
                      "got exception instead of a response for "
                          + counter
                          + ": "
                          + exp.getDetailedMessage(),
                      false,
                      equalTo(true));
                }
              });

      StringMessageResponse message = res.txGet();
      assertThat(message.message, equalTo("hello " + counter + "ms"));
    }

    serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
  }
  @Test
  public void testClearAllCaches() throws Exception {
    client().admin().indices().prepareDelete().execute().actionGet();
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            ImmutableSettings.settingsBuilder()
                .put("index.number_of_replicas", 0)
                .put("index.number_of_shards", 1))
        .execute()
        .actionGet();
    client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
    client().prepareIndex("test", "type", "1").setSource("field", "value1").execute().actionGet();
    client().prepareIndex("test", "type", "2").setSource("field", "value2").execute().actionGet();
    client().admin().indices().prepareRefresh().execute().actionGet();

    NodesStatsResponse nodesStats =
        client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));

    IndicesStatsResponse indicesStats =
        client()
            .admin()
            .indices()
            .prepareStats("test")
            .clear()
            .setFieldData(true)
            .setFilterCache(true)
            .execute()
            .actionGet();
    assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
    assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));

    // sort to load it to field data and filter to load filter cache
    client()
        .prepareSearch()
        .setFilter(FilterBuilders.termFilter("field", "value1"))
        .addSort("field", SortOrder.ASC)
        .execute()
        .actionGet();
    client()
        .prepareSearch()
        .setFilter(FilterBuilders.termFilter("field", "value2"))
        .addSort("field", SortOrder.ASC)
        .execute()
        .actionGet();

    nodesStats =
        client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(),
        greaterThan(0l));
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(),
        greaterThan(0l));

    indicesStats =
        client()
            .admin()
            .indices()
            .prepareStats("test")
            .clear()
            .setFieldData(true)
            .setFilterCache(true)
            .execute()
            .actionGet();
    assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
    assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));

    client().admin().indices().prepareClearCache().execute().actionGet();
    Thread.sleep(100); // Make sure the filter cache entries have been removed...
    nodesStats =
        client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
    assertThat(
        nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));

    indicesStats =
        client()
            .admin()
            .indices()
            .prepareStats("test")
            .clear()
            .setFieldData(true)
            .setFilterCache(true)
            .execute()
            .actionGet();
    assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
    assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
  }
Beispiel #24
0
  @Test
  public void testSimpleTTL() throws Exception {
    assertAcked(
        prepareCreate("test")
            .addMapping(
                "type1",
                XContentFactory.jsonBuilder()
                    .startObject()
                    .startObject("type1")
                    .startObject("_timestamp")
                    .field("enabled", true)
                    .field("store", "yes")
                    .endObject()
                    .startObject("_ttl")
                    .field("enabled", true)
                    .endObject()
                    .endObject()
                    .endObject())
            .addMapping(
                "type2",
                XContentFactory.jsonBuilder()
                    .startObject()
                    .startObject("type2")
                    .startObject("_timestamp")
                    .field("enabled", true)
                    .field("store", "yes")
                    .endObject()
                    .startObject("_ttl")
                    .field("enabled", true)
                    .field("default", "1d")
                    .endObject()
                    .endObject()
                    .endObject()));
    ensureYellow("test");

    final NumShards test = getNumShards("test");

    long providedTTLValue = 3000;
    logger.info("--> checking ttl");
    // Index one doc without routing, one doc with routing, one doc with not TTL and no default and
    // one doc with default TTL
    long now = System.currentTimeMillis();
    IndexResponse indexResponse =
        client()
            .prepareIndex("test", "type1", "1")
            .setSource("field1", "value1")
            .setTimestamp(String.valueOf(now))
            .setTTL(providedTTLValue)
            .setRefresh(true)
            .get();
    assertThat(indexResponse.isCreated(), is(true));
    indexResponse =
        client()
            .prepareIndex("test", "type1", "with_routing")
            .setSource("field1", "value1")
            .setTimestamp(String.valueOf(now))
            .setTTL(providedTTLValue)
            .setRouting("routing")
            .setRefresh(true)
            .get();
    assertThat(indexResponse.isCreated(), is(true));
    indexResponse =
        client().prepareIndex("test", "type1", "no_ttl").setSource("field1", "value1").get();
    assertThat(indexResponse.isCreated(), is(true));
    indexResponse =
        client().prepareIndex("test", "type2", "default_ttl").setSource("field1", "value1").get();
    assertThat(indexResponse.isCreated(), is(true));

    // realtime get check
    long currentTime = System.currentTimeMillis();
    GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
    long ttl0;
    if (getResponse.isExists()) {
      ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
      assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
    } else {
      assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
    }
    // verify the ttl is still decreasing when going to the replica
    currentTime = System.currentTimeMillis();
    getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
    if (getResponse.isExists()) {
      ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
      assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
    } else {
      assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
    }
    // non realtime get (stored)
    currentTime = System.currentTimeMillis();
    getResponse =
        client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
    if (getResponse.isExists()) {
      ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
      assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
    } else {
      assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
    }
    // non realtime get going the replica
    currentTime = System.currentTimeMillis();
    getResponse =
        client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
    if (getResponse.isExists()) {
      ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
      assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
    } else {
      assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
    }

    // no TTL provided so no TTL fetched
    getResponse =
        client()
            .prepareGet("test", "type1", "no_ttl")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    assertThat(getResponse.getField("_ttl"), nullValue());
    // no TTL provided make sure it has default TTL
    getResponse =
        client()
            .prepareGet("test", "type2", "default_ttl")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
    assertThat(ttl0, greaterThan(0L));

    IndicesStatsResponse response =
        client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
    assertThat(
        response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount(),
        equalTo(0L));

    // make sure the purger has done its job for all indexed docs that are expired
    long shouldBeExpiredDate = now + providedTTLValue + PURGE_INTERVAL + 2000;
    currentTime = System.currentTimeMillis();
    if (shouldBeExpiredDate - currentTime > 0) {
      Thread.sleep(shouldBeExpiredDate - currentTime);
    }

    // We can't assume that after waiting for ttl + purgeInterval (waitTime) that the document have
    // actually been deleted.
    // The ttl purging happens in the background in a different thread, and might not have been
    // completed after waiting for waitTime.
    // But we can use index statistics' delete count to be sure that deletes have been executed,
    // that must be incremented before
    // ttl purging has finished.
    logger.info("--> checking purger");
    assertThat(
        awaitBusy(
            new Predicate<Object>() {
              @Override
              public boolean apply(Object input) {
                if (rarely()) {
                  client().admin().indices().prepareFlush("test").get();
                } else if (rarely()) {
                  client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get();
                }
                IndicesStatsResponse response =
                    client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
                // TTL deletes two docs, but it is indexed in the primary shard and replica shard.
                return response
                        .getIndices()
                        .get("test")
                        .getTotal()
                        .getIndexing()
                        .getTotal()
                        .getDeleteCount()
                    == 2L * test.dataCopies;
              }
            },
            5,
            TimeUnit.SECONDS),
        equalTo(true));

    // realtime get check
    getResponse =
        client()
            .prepareGet("test", "type1", "1")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    getResponse =
        client()
            .prepareGet("test", "type1", "with_routing")
            .setRouting("routing")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    // replica realtime get check
    getResponse =
        client()
            .prepareGet("test", "type1", "1")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    getResponse =
        client()
            .prepareGet("test", "type1", "with_routing")
            .setRouting("routing")
            .setFields("_ttl")
            .setRealtime(true)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));

    // Need to run a refresh, in order for the non realtime get to work.
    client().admin().indices().prepareRefresh("test").execute().actionGet();

    // non realtime get (stored) check
    getResponse =
        client()
            .prepareGet("test", "type1", "1")
            .setFields("_ttl")
            .setRealtime(false)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    getResponse =
        client()
            .prepareGet("test", "type1", "with_routing")
            .setRouting("routing")
            .setFields("_ttl")
            .setRealtime(false)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    // non realtime get going the replica check
    getResponse =
        client()
            .prepareGet("test", "type1", "1")
            .setFields("_ttl")
            .setRealtime(false)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    getResponse =
        client()
            .prepareGet("test", "type1", "with_routing")
            .setRouting("routing")
            .setFields("_ttl")
            .setRealtime(false)
            .execute()
            .actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
  }
  public void testDelayedMappingPropagationOnReplica() throws Exception {
    // This is essentially the same thing as testDelayedMappingPropagationOnPrimary
    // but for replicas
    // Here we want to test that everything goes well if the mappings that
    // are needed for a document are not available on the replica at the
    // time of indexing it
    final List<String> nodeNames = internalCluster().startNodesAsync(2).get();
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());

    final String master = internalCluster().getMasterName();
    assertThat(nodeNames, hasItem(master));
    String otherNode = null;
    for (String node : nodeNames) {
      if (node.equals(master) == false) {
        otherNode = node;
        break;
      }
    }
    assertNotNull(otherNode);

    // Force allocation of the primary on the master node by first only allocating on the master
    // and then allowing all nodes so that the replica gets allocated on the other node
    assertAcked(
        prepareCreate("index")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
                    .put("index.routing.allocation.include._name", master))
            .get());
    assertAcked(
        client()
            .admin()
            .indices()
            .prepareUpdateSettings("index")
            .setSettings(Settings.builder().put("index.routing.allocation.include._name", ""))
            .get());
    ensureGreen();

    // Check routing tables
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    assertEquals(master, state.nodes().masterNode().name());
    List<ShardRouting> shards = state.routingTable().allShards("index");
    assertThat(shards, hasSize(2));
    for (ShardRouting shard : shards) {
      if (shard.primary()) {
        // primary must be on the master
        assertEquals(state.nodes().masterNodeId(), shard.currentNodeId());
      } else {
        assertTrue(shard.active());
      }
    }

    // Block cluster state processing on the replica
    BlockClusterStateProcessing disruption =
        new BlockClusterStateProcessing(otherNode, getRandom());
    internalCluster().setDisruptionScheme(disruption);
    disruption.startDisrupting();
    final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
    client()
        .admin()
        .indices()
        .preparePutMapping("index")
        .setType("type")
        .setSource("field", "type=long")
        .execute(
            new ActionListener<PutMappingResponse>() {
              @Override
              public void onResponse(PutMappingResponse response) {
                putMappingResponse.set(response);
              }

              @Override
              public void onFailure(Throwable e) {
                putMappingResponse.set(e);
              }
            });
    // Wait for mappings to be available on master
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            final IndicesService indicesService =
                internalCluster().getInstance(IndicesService.class, master);
            final IndexService indexService = indicesService.indexServiceSafe("index");
            assertNotNull(indexService);
            final MapperService mapperService = indexService.mapperService();
            DocumentMapper mapper = mapperService.documentMapper("type");
            assertNotNull(mapper);
            assertNotNull(mapper.mappers().getMapper("field"));
          }
        });

    final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
    client()
        .prepareIndex("index", "type", "1")
        .setSource("field", 42)
        .execute(
            new ActionListener<IndexResponse>() {
              @Override
              public void onResponse(IndexResponse response) {
                docIndexResponse.set(response);
              }

              @Override
              public void onFailure(Throwable e) {
                docIndexResponse.set(e);
              }
            });

    // Wait for document to be indexed on primary
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            assertTrue(
                client()
                    .prepareGet("index", "type", "1")
                    .setPreference("_primary")
                    .get()
                    .isExists());
          }
        });

    // The mappings have not been propagated to the replica yet as a consequence the document count
    // not be indexed
    // We wait on purpose to make sure that the document is not indexed because the shard operation
    // is stalled
    // and not just because it takes time to replicate the indexing request to the replica
    Thread.sleep(100);
    assertThat(putMappingResponse.get(), equalTo(null));
    assertThat(docIndexResponse.get(), equalTo(null));

    // Now make sure the indexing request finishes successfully
    disruption.stopDisrupting();
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
            PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
            assertTrue(resp.isAcknowledged());
            assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
            IndexResponse docResp = (IndexResponse) docIndexResponse.get();
            assertEquals(
                Arrays.toString(docResp.getShardInfo().getFailures()),
                2,
                docResp.getShardInfo().getTotal()); // both shards should have succeeded
          }
        });
  }
  public void testDelayedMappingPropagationOnPrimary() throws Exception {
    // Here we want to test that things go well if there is a first request
    // that adds mappings but before mappings are propagated to all nodes
    // another index request introduces the same mapping. The master node
    // will reply immediately since it did not change the cluster state
    // but the change might not be on the node that performed the indexing
    // operation yet

    Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build();
    final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
    assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());

    final String master = internalCluster().getMasterName();
    assertThat(nodeNames, hasItem(master));
    String otherNode = null;
    for (String node : nodeNames) {
      if (node.equals(master) == false) {
        otherNode = node;
        break;
      }
    }
    assertNotNull(otherNode);

    // Don't allocate the shard on the master node
    assertAcked(
        prepareCreate("index")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                    .put("index.routing.allocation.exclude._name", master))
            .get());
    ensureGreen();

    // Check routing tables
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    assertEquals(master, state.nodes().masterNode().name());
    List<ShardRouting> shards = state.routingTable().allShards("index");
    assertThat(shards, hasSize(1));
    for (ShardRouting shard : shards) {
      if (shard.primary()) {
        // primary must not be on the master node
        assertFalse(state.nodes().masterNodeId().equals(shard.currentNodeId()));
      } else {
        fail(); // only primaries
      }
    }

    // Block cluster state processing where our shard is
    BlockClusterStateProcessing disruption =
        new BlockClusterStateProcessing(otherNode, getRandom());
    internalCluster().setDisruptionScheme(disruption);
    disruption.startDisrupting();

    // Add a new mapping...
    final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
    client()
        .admin()
        .indices()
        .preparePutMapping("index")
        .setType("type")
        .setSource("field", "type=long")
        .execute(
            new ActionListener<PutMappingResponse>() {
              @Override
              public void onResponse(PutMappingResponse response) {
                putMappingResponse.set(response);
              }

              @Override
              public void onFailure(Throwable e) {
                putMappingResponse.set(e);
              }
            });
    // ...and wait for mappings to be available on master
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            ImmutableOpenMap<String, MappingMetaData> indexMappings =
                client()
                    .admin()
                    .indices()
                    .prepareGetMappings("index")
                    .get()
                    .getMappings()
                    .get("index");
            assertNotNull(indexMappings);
            MappingMetaData typeMappings = indexMappings.get("type");
            assertNotNull(typeMappings);
            Object properties;
            try {
              properties = typeMappings.getSourceAsMap().get("properties");
            } catch (IOException e) {
              throw new AssertionError(e);
            }
            assertNotNull(properties);
            Object fieldMapping = ((Map<String, Object>) properties).get("field");
            assertNotNull(fieldMapping);
          }
        });

    final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
    client()
        .prepareIndex("index", "type", "1")
        .setSource("field", 42)
        .execute(
            new ActionListener<IndexResponse>() {
              @Override
              public void onResponse(IndexResponse response) {
                docIndexResponse.set(response);
              }

              @Override
              public void onFailure(Throwable e) {
                docIndexResponse.set(e);
              }
            });

    // Wait a bit to make sure that the reason why we did not get a response
    // is that cluster state processing is blocked and not just that it takes
    // time to process the indexing request
    Thread.sleep(100);
    assertThat(putMappingResponse.get(), equalTo(null));
    assertThat(docIndexResponse.get(), equalTo(null));

    // Now make sure the indexing request finishes successfully
    disruption.stopDisrupting();
    assertBusy(
        new Runnable() {
          @Override
          public void run() {
            assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
            PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
            assertTrue(resp.isAcknowledged());
            assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
            IndexResponse docResp = (IndexResponse) docIndexResponse.get();
            assertEquals(
                Arrays.toString(docResp.getShardInfo().getFailures()),
                1,
                docResp.getShardInfo().getTotal());
          }
        });
  }
  @Test(timeout = 20000)
  public void testUpdatingThreadPoolSettings() throws Exception {
    internalCluster().startNodesAsync(2).get();
    ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class);
    // Check that settings are changed
    assertThat(
        ((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES),
        equalTo(5L));
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build())
        .execute()
        .actionGet();
    assertThat(
        ((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES),
        equalTo(10L));

    // Make sure that threads continue executing when executor is replaced
    final CyclicBarrier barrier = new CyclicBarrier(2);
    Executor oldExecutor = threadPool.executor(Names.SEARCH);
    threadPool
        .executor(Names.SEARCH)
        .execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  barrier.await();
                } catch (InterruptedException ex) {
                  Thread.currentThread().interrupt();
                } catch (BrokenBarrierException ex) {
                  //
                }
              }
            });
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build())
        .execute()
        .actionGet();
    assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
    assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
    assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
    assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
    barrier.await();

    // Make sure that new thread executor is functional
    threadPool
        .executor(Names.SEARCH)
        .execute(
            new Runnable() {
              @Override
              public void run() {
                try {
                  barrier.await();
                } catch (InterruptedException ex) {
                  Thread.currentThread().interrupt();
                } catch (BrokenBarrierException ex) {
                  //
                }
              }
            });
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build())
        .execute()
        .actionGet();
    barrier.await();
    Thread.sleep(200);

    // Check that node info is correct
    NodesInfoResponse nodesInfoResponse =
        client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
    for (int i = 0; i < 2; i++) {
      NodeInfo nodeInfo = nodesInfoResponse.getNodes()[i];
      boolean found = false;
      for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
        if (info.getName().equals(Names.SEARCH)) {
          assertThat(info.getType(), equalTo("fixed"));
          found = true;
          break;
        }
      }
      assertThat(found, equalTo(true));

      Map<String, Object> poolMap =
          getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);
    }
  }
Beispiel #28
0
  @Test
  public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      doc.add(
          new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      docs.add(doc);
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      Store store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      first = store.getMetadata();
      assertDeleteContent(store, directoryService);
      store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
      Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
      assertThat(second.get(md.name()), notNullValue());
      // si files are different - containing timestamps etc
      assertThat(second.get(md.name()).isSame(md), equalTo(false));
    }
    assertThat(diff.different.size(), equalTo(first.size()));
    assertThat(
        diff.identical.size(),
        equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc =
        new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
      if (md.name().endsWith(".liv")) {
        delFile = md;
        break;
      }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
      assertThat(
          afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
      // an entire segment must be missing (single doc segment got dropped)
      assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(
        true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(
              newCommitMetaData.size()
                  - 5)); // segments_N, del file, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
      assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
      assertThat(
          newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(0));
      assertThat(
          newCommitDiff.missing.size(),
          equalTo(
              4)); // an entire segment must be missing (single doc segment got dropped)  plus the
                   // commit is different
    }

    store.deleteContent();
    IOUtils.close(store);
  }