private static Client startClient(Path tempDir, TransportAddress... transportAddresses) {
    Settings clientSettings =
        Settings.settingsBuilder()
            .put("name", "qa_smoke_client_" + counter.getAndIncrement())
            .put(
                InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
                true) // prevents any settings to be replaced by system properties.
            .put("client.transport.ignore_cluster_name", true)
            .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
            .put("node.mode", "network")
            .build(); // we require network here!

    TransportClient.Builder transportClientBuilder =
        TransportClient.builder().settings(clientSettings);
    TransportClient client =
        transportClientBuilder.build().addTransportAddresses(transportAddresses);

    logger.info("--> Elasticsearch Java TransportClient started");

    Exception clientException = null;
    try {
      ClusterHealthResponse health = client.admin().cluster().prepareHealth().get();
      logger.info(
          "--> connected to [{}] cluster which is running [{}] node(s).",
          health.getClusterName(),
          health.getNumberOfNodes());
    } catch (Exception e) {
      clientException = e;
    }

    assumeNoException(
        "Sounds like your cluster is not running at " + clusterAddresses, clientException);

    return client;
  }
  private void initArticleIndexMapping() {
    try {
      boolean isExists = ElasticsearchHelper.isExistsIndex(transportClient, INDICE);
      if (!isExists) {
        // create index
        transportClient.admin().indices().prepareCreate(INDICE).execute().actionGet();
        // crate mapping
        XContentBuilder mapping =
            XContentFactory.jsonBuilder()
                .startObject()
                .startObject(TYPE)
                .startObject("_all")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .startObject("properties")
                .startObject(TITLE)
                .field("type", "string")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .startObject(SUMMARY)
                .field("type", "string")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .endObject()
                .endObject()
                .endObject();

        PutMappingRequest mappingRequest =
            Requests.putMappingRequest(INDICE).type(TYPE).source(mapping);
        transportClient.admin().indices().putMapping(mappingRequest).actionGet();

        logger.debug("create index and mapping are success!");
      } else {
        logger.debug("Index already exists!");
      }

    } catch (Exception e) {
      logger.error("create index and mapping are failure!", e);
    }
  }
Пример #3
0
  /**
   * Builds the Elasticsearch client using the properties this connection was instantiated with
   *
   * @return
   * @throws SQLException
   */
  private Client buildClient() throws SQLException {
    if (props.containsKey("test")) { // used for integration tests
      return ESIntegTestCase.client();
    } else
      try {
        Settings.Builder settingsBuilder = Settings.settingsBuilder();
        for (Object key : this.props.keySet()) {
          settingsBuilder.put(key, this.props.get(key));
        }
        Settings settings = settingsBuilder.build();
        TransportClient client =
            TransportClient.builder()
                .settings(settings)
                .build()
                .addTransportAddress(
                    new InetSocketTransportAddress(InetAddress.getByName(host), port));

        // add additional hosts if set in URL query part
        if (this.props.containsKey("es.hosts"))
          for (String hostPort : this.props.getProperty("es.hosts").split(",")) {
            String newHost = hostPort.split(":")[0].trim();
            int newPort =
                (hostPort.split(":").length > 1
                    ? Integer.parseInt(hostPort.split(":")[1])
                    : Utils.PORT);
            client.addTransportAddress(
                new InetSocketTransportAddress(InetAddress.getByName(newHost), newPort));
            logger.info("Adding additional ES host: " + hostPort);
          }

        // check if index exists
        if (index != null) {
          boolean indexExists =
              client
                  .admin()
                  .indices()
                  .exists(new IndicesExistsRequest(index))
                  .actionGet()
                  .isExists();
          if (!indexExists) throw new SQLException("Index or Alias '" + index + "' does not exist");
        }
        return client;
      } catch (UnknownHostException e) {
        throw new SQLException("Unable to connect to " + host, e);
      } catch (Throwable t) {
        throw new SQLException("Unable to connect to database", t);
      }
  }
 void testBulkProcessor(int processors) {
   logger.info("Test bulk processor with concurrent request: {}", processors);
   Stopwatch watcher = Stopwatch.createStarted();
   try {
     indexedDocumentCount.set(0);
     bulkProcessor =
         BulkProcessor.builder(client, listener)
             .setBulkActions(DEFAULT_BULK_ACTIONS)
             .setConcurrentRequests(processors)
             // .setFlushInterval(DEFAULT_FLUSH_INTERVAL)
             .setBulkSize(DEFAULT_BULK_SIZE)
             .build();
     if (client.admin().indices().prepareExists(INDEX_NAME).get().isExists()) {
       client.admin().indices().prepareDelete(INDEX_NAME).get();
     }
     client.admin().indices().prepareCreate(INDEX_NAME).get();
     logger.info(
         "Done Cluster Health, status: {}",
         client
             .admin()
             .cluster()
             .health(clusterHealthRequest().waitForGreenStatus())
             .get()
             .getStatus());
     for (int i = 0; i < MAX; i++) {
       Map<String, Object> data = new HashMap<String, Object>();
       data.put("name", "test-" + i);
       data.put("date", new Date());
       bulkProcessor.add(
           indexRequest(INDEX_NAME)
               .type(TYPE_NAME)
               .source(XContentFactory.jsonBuilder().map(data)));
     }
     bulkProcessor.close();
     logger.info(
         "Done Cluster Health, status: {}",
         client
             .admin()
             .cluster()
             .health(clusterHealthRequest().waitForGreenStatus())
             .get()
             .getStatus());
     logger.info("Number of documents indexed from afterBulk: {}", indexedDocumentCount.get());
     client.admin().indices().prepareRefresh(INDEX_NAME).get();
     long count = client.prepareCount(INDEX_NAME).get().getCount();
     logger.info("Number of documents: {} in index {}", count, INDEX_NAME);
     if (count != MAX) {
       throw new RuntimeException(
           String.format(
               "Number of documents indexed %s does not match the target %s", count, MAX));
     }
   } catch (Throwable t) {
     logger.error("testBulkProcessor failed", t);
   } finally {
     watcher.stop();
     logger.info("Elpased time: {}", watcher.toString());
     if (client.admin().indices().prepareExists(INDEX_NAME).get().isExists()) {
       client.admin().indices().prepareDelete(INDEX_NAME).get();
       try {
         client
             .admin()
             .cluster()
             .health(clusterHealthRequest().waitForGreenStatus())
             .get()
             .getStatus();
       } catch (Throwable t) {
         throw new RuntimeException(t);
       }
     }
   }
 }