@Test
  @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
  public void testRepositoryInRemoteRegion() {
    Client client = client();
    Settings settings = internalCluster().getInstance(Settings.class);
    Settings bucketSettings = settings.getByPrefix("repositories.s3.remote-bucket.");
    logger.info(
        "-->  creating s3 repository with bucket[{}] and path [{}]",
        bucketSettings.get("bucket"),
        basePath);
    PutRepositoryResponse putRepositoryResponse =
        client
            .admin()
            .cluster()
            .preparePutRepository("test-repo")
            .setType("s3")
            .setSettings(
                Settings.settingsBuilder()
                    .put("base_path", basePath)
                    .put("bucket", bucketSettings.get("bucket"))
                    .put("region", bucketSettings.get("region")))
            .get();
    assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));

    assertRepositoryIsOperational(client, "test-repo");
  }
  private IndexMetaData(
      String index,
      long version,
      State state,
      Settings settings,
      ImmutableMap<String, MappingMetaData> mappings,
      ImmutableMap<String, AliasMetaData> aliases,
      ImmutableMap<String, Custom> customs) {
    Preconditions.checkArgument(
        settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1) != -1,
        "must specify numberOfShards for index [" + index + "]");
    Preconditions.checkArgument(
        settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1) != -1,
        "must specify numberOfReplicas for index [" + index + "]");
    this.index = index;
    this.version = version;
    this.state = state;
    this.settings = settings;
    this.mappings = mappings;
    this.customs = customs;
    this.totalNumberOfShards = numberOfShards() * (numberOfReplicas() + 1);

    this.aliases = aliases;

    ImmutableMap<String, String> includeMap =
        settings.getByPrefix("index.routing.allocation.include.").getAsMap();
    if (includeMap.isEmpty()) {
      includeFilters = null;
    } else {
      includeFilters = DiscoveryNodeFilters.buildFromKeyValue(includeMap);
    }
    ImmutableMap<String, String> excludeMap =
        settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
    if (excludeMap.isEmpty()) {
      excludeFilters = null;
    } else {
      excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(excludeMap);
    }
  }
  /**
   * Loads the hunspell dictionary for the given local.
   *
   * @param locale The locale of the hunspell dictionary to be loaded.
   * @param nodeSettings The node level settings
   * @param env The node environment (from which the conf path will be resolved)
   * @return The loaded Hunspell dictionary
   * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
   */
  private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env)
      throws Exception {
    if (logger.isDebugEnabled()) {
      logger.debug("Loading hunspell dictionary [{}]...", locale);
    }
    Path dicDir = hunspellDir.resolve(locale);
    if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
    }

    // merging node settings with hunspell dictionary specific settings
    Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
    nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));

    boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

    Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
    if (affixFiles.length == 0) {
      throw new ElasticsearchException(
          String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
    }
    if (affixFiles.length != 1) {
      throw new ElasticsearchException(
          String.format(
              Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
    }
    InputStream affixStream = null;

    Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
    List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
    try {

      for (int i = 0; i < dicFiles.length; i++) {
        dicStreams.add(Files.newInputStream(dicFiles[i]));
      }

      affixStream = Files.newInputStream(affixFiles[0]);

      try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
        return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
      }

    } catch (Exception e) {
      logger.error("Could not load hunspell dictionary [{}]", e, locale);
      throw e;
    } finally {
      IOUtils.close(affixStream);
      IOUtils.close(dicStreams);
    }
  }
  private void setupElasticsearchServer() throws Exception {
    logger.debug("*** setupElasticsearchServer ***");
    try {
      Tuple<Settings, Environment> initialSettings =
          InternalSettingsPerparer.prepareSettings(settings, true);
      if (!initialSettings.v2().configFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().configFile());
      }

      if (!initialSettings.v2().logsFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().logsFile());
      }

      if (!initialSettings.v2().pluginsFile().exists()) {
        FileSystemUtils.mkdirs(initialSettings.v2().pluginsFile());
        if (settings.getByPrefix("plugins") != null) {
          PluginManager pluginManager = new PluginManager(initialSettings.v2(), null);

          Map<String, String> plugins = settings.getByPrefix("plugins").getAsMap();
          for (String key : plugins.keySet()) {
            pluginManager.downloadAndExtract(plugins.get(key), false);
          }
        }
      } else {
        logger.info(
            "Plugin {} has been already installed.", settings.get("plugins.mapper-attachments"));
        logger.info(
            "Plugin {} has been already installed.", settings.get("plugins.lang-javascript"));
      }

      node = nodeBuilder().local(true).settings(settings).node();
    } catch (Exception ex) {
      logger.error("setupElasticsearchServer failed", ex);
      throw ex;
    }
  }
  @Inject
  public HdfsGateway(
      Settings settings,
      ThreadPool threadPool,
      ClusterService clusterService,
      ClusterName clusterName)
      throws IOException {
    super(settings, threadPool, clusterService);

    this.closeFileSystem = componentSettings.getAsBoolean("close_fs", true);
    String uri = componentSettings.get("uri");
    if (uri == null) {
      throw new ElasticSearchIllegalArgumentException(
          "hdfs gateway requires the 'uri' setting to be set");
    }
    String path = componentSettings.get("path");
    if (path == null) {
      throw new ElasticSearchIllegalArgumentException(
          "hdfs gateway requires the 'path' path setting to be set");
    }
    Path hPath = new Path(new Path(path), clusterName.value());

    int concurrentStreams = componentSettings.getAsInt("concurrent_streams", 5);
    this.concurrentStreamPool =
        EsExecutors.newScalingExecutorService(
            1,
            concurrentStreams,
            5,
            TimeUnit.SECONDS,
            EsExecutors.daemonThreadFactory(settings, "[s3_stream]"));

    logger.debug(
        "Using uri [{}], path [{}], concurrent_streams [{}]", uri, hPath, concurrentStreams);

    Configuration conf = new Configuration();
    Settings hdfsSettings = settings.getByPrefix("hdfs.conf.");
    for (Map.Entry<String, String> entry : hdfsSettings.getAsMap().entrySet()) {
      conf.set(entry.getKey(), entry.getValue());
    }

    fileSystem = FileSystem.get(URI.create(uri), conf);

    initialize(
        new HdfsBlobStore(settings, fileSystem, concurrentStreamPool, hPath), clusterName, null);
  }
  @Inject
  public AwsEc2UnicastHostsProvider(
      Settings settings,
      TransportService transportService,
      AwsEc2Service awsEc2Service,
      Version version) {
    super(settings);
    this.transportService = transportService;
    this.client = awsEc2Service.client();
    this.version = version;

    this.hostType =
        HostType.valueOf(
            settings.get("discovery.ec2.host_type", "private_ip").toUpperCase(Locale.ROOT));

    this.bindAnyGroup = settings.getAsBoolean("discovery.ec2.any_group", true);
    this.groups = new HashSet<>();
    groups.addAll(Arrays.asList(settings.getAsArray("discovery.ec2.groups")));

    this.tags = settings.getByPrefix("discovery.ec2.tag.").getAsMap();

    Set<String> availabilityZones = new HashSet();
    availabilityZones.addAll(
        Arrays.asList(settings.getAsArray("discovery.ec2.availability_zones")));
    if (settings.get("discovery.ec2.availability_zones") != null) {
      availabilityZones.addAll(
          Strings.commaDelimitedListToSet(settings.get("discovery.ec2.availability_zones")));
    }
    this.availabilityZones = availabilityZones;

    if (logger.isDebugEnabled()) {
      logger.debug(
          "using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]",
          hostType,
          tags,
          groups,
          bindAnyGroup,
          availabilityZones);
    }
  }
  @Inject
  public CouchbaseCAPITransportImpl(
      Settings settings,
      RestController restController,
      NetworkService networkService,
      IndicesService indicesService,
      MetaDataMappingService metaDataMappingService,
      Client client) {
    super(settings);
    this.networkService = networkService;
    this.indicesService = indicesService;
    this.metaDataMappingService = metaDataMappingService;
    this.client = client;
    this.port = settings.get("couchbase.port", "9091-10091");
    this.bindHost = componentSettings.get("bind_host");
    this.publishHost = componentSettings.get("publish_host");
    this.username = settings.get("couchbase.username", "Administrator");
    this.password = settings.get("couchbase.password", "");

    this.bucketUUIDCacheEvictMs = settings.getAsLong("couchbase.bucketUUIDCacheEvictMs", 300000L);
    this.bucketUUIDCache =
        CacheBuilder.newBuilder()
            .expireAfterWrite(this.bucketUUIDCacheEvictMs, TimeUnit.MILLISECONDS)
            .build();

    int defaultNumVbuckets = 1024;
    if (System.getProperty("os.name").toLowerCase().contains("mac")) {
      logger.info("Detected platform is Mac, changing default num_vbuckets to 64");
      defaultNumVbuckets = 64;
    }
    this.numVbuckets = settings.getAsInt("couchbase.num_vbuckets", defaultNumVbuckets);

    pluginSettings = new PluginSettings();
    pluginSettings.setCheckpointDocumentType(
        settings.get(
            "couchbase.typeSelector.checkpointDocumentType",
            PluginSettings.DEFAULT_DOCUMENT_TYPE_CHECKPOINT));
    pluginSettings.setDynamicTypePath(settings.get("couchbase.dynamicTypePath"));
    pluginSettings.setResolveConflicts(settings.getAsBoolean("couchbase.resolveConflicts", true));
    pluginSettings.setWrapCounters(settings.getAsBoolean("couchbase.wrapCounters", false));
    pluginSettings.setMaxConcurrentRequests(
        settings.getAsLong("couchbase.maxConcurrentRequests", 1024L));
    pluginSettings.setBulkIndexRetries(settings.getAsLong("couchbase.bulkIndexRetries", 10L));
    pluginSettings.setBulkIndexRetryWaitMs(
        settings.getAsLong("couchbase.bulkIndexRetryWaitMs", 1000L));
    pluginSettings.setIgnoreDeletes(
        new ArrayList<String>(
            Arrays.asList(settings.get("couchbase.ignoreDeletes", "").split("[:,;\\s]"))));
    pluginSettings.getIgnoreDeletes().removeAll(Arrays.asList("", null));
    pluginSettings.setIgnoreFailures(settings.getAsBoolean("couchbase.ignoreFailures", false));
    pluginSettings.setDocumentTypeRoutingFields(
        settings.getByPrefix("couchbase.documentTypeRoutingFields.").getAsMap());
    pluginSettings.setIgnoreDotIndexes(settings.getAsBoolean("couchbase.ignoreDotIndexes", true));
    pluginSettings.setIncludeIndexes(
        new ArrayList<String>(
            Arrays.asList(settings.get("couchbase.includeIndexes", "").split("[:,;\\s]"))));
    pluginSettings.getIncludeIndexes().removeAll(Arrays.asList("", null));

    TypeSelector typeSelector;
    Class<? extends TypeSelector> typeSelectorClass =
        settings.<TypeSelector>getAsClass("couchbase.typeSelector", DefaultTypeSelector.class);
    try {
      typeSelector = typeSelectorClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.typeSelector", e);
    }
    typeSelector.configure(settings);
    pluginSettings.setTypeSelector(typeSelector);

    ParentSelector parentSelector;
    Class<? extends ParentSelector> parentSelectorClass =
        settings.<ParentSelector>getAsClass(
            "couchbase.parentSelector", DefaultParentSelector.class);
    try {
      parentSelector = parentSelectorClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.parentSelector", e);
    }
    parentSelector.configure(settings);
    pluginSettings.setParentSelector(parentSelector);

    KeyFilter keyFilter;
    Class<? extends KeyFilter> keyFilterClass =
        settings.<KeyFilter>getAsClass("couchbase.keyFilter", DefaultKeyFilter.class);
    try {
      keyFilter = keyFilterClass.newInstance();
    } catch (Exception e) {
      throw new ElasticsearchException("couchbase.keyFilter", e);
    }
    keyFilter.configure(settings);
    pluginSettings.setKeyFilter(keyFilter);

    // Log settings info
    logger.info(
        "Couchbase transport will ignore delete/expiration operations for these buckets: {}",
        pluginSettings.getIgnoreDeletes());
    logger.info(
        "Couchbase transport will ignore indexing failures and not throw exception to Couchbase: {}",
        pluginSettings.getIgnoreFailures());
    logger.info(
        "Couchbase transport is using type selector: {}",
        typeSelector.getClass().getCanonicalName());
    logger.info(
        "Couchbase transport is using parent selector: {}",
        parentSelector.getClass().getCanonicalName());
    logger.info(
        "Couchbase transport is using key filter: {}", keyFilter.getClass().getCanonicalName());
    for (String key : pluginSettings.getDocumentTypeRoutingFields().keySet()) {
      String routingField = pluginSettings.getDocumentTypeRoutingFields().get(key);
      logger.info("Using field {} as routing for type {}", routingField, key);
    }
    logger.info("Plugin Settings: {}", pluginSettings.toString());
  }
  /** Deletes content of the repository files in the bucket */
  public void cleanRepositoryFiles(String basePath) {
    Settings settings = internalCluster().getInstance(Settings.class);
    Settings[] buckets = {
      settings.getByPrefix("repositories.s3."),
      settings.getByPrefix("repositories.s3.private-bucket."),
      settings.getByPrefix("repositories.s3.remote-bucket."),
      settings.getByPrefix("repositories.s3.external-bucket.")
    };
    for (Settings bucket : buckets) {
      String endpoint = bucket.get("endpoint", settings.get("repositories.s3.endpoint"));
      String protocol = bucket.get("protocol", settings.get("repositories.s3.protocol"));
      String region = bucket.get("region", settings.get("repositories.s3.region"));
      String accessKey = bucket.get("access_key", settings.get("cloud.aws.access_key"));
      String secretKey = bucket.get("secret_key", settings.get("cloud.aws.secret_key"));
      String bucketName = bucket.get("bucket");

      // We check that settings has been set in elasticsearch.yml integration test file
      // as described in README
      assertThat(
          "Your settings in elasticsearch.yml are incorrects. Check README file.",
          bucketName,
          notNullValue());
      AmazonS3 client =
          internalCluster()
              .getInstance(AwsS3Service.class)
              .client(endpoint, protocol, region, accessKey, secretKey);
      try {
        ObjectListing prevListing = null;
        // From
        // http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
        // we can do at most 1K objects per delete
        // We don't know the bucket name until first object listing
        DeleteObjectsRequest multiObjectDeleteRequest = null;
        ArrayList<DeleteObjectsRequest.KeyVersion> keys =
            new ArrayList<DeleteObjectsRequest.KeyVersion>();
        while (true) {
          ObjectListing list;
          if (prevListing != null) {
            list = client.listNextBatchOfObjects(prevListing);
          } else {
            list = client.listObjects(bucketName, basePath);
            multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
          }
          for (S3ObjectSummary summary : list.getObjectSummaries()) {
            keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
            // Every 500 objects batch the delete request
            if (keys.size() > 500) {
              multiObjectDeleteRequest.setKeys(keys);
              client.deleteObjects(multiObjectDeleteRequest);
              multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
              keys.clear();
            }
          }
          if (list.isTruncated()) {
            prevListing = list;
          } else {
            break;
          }
        }
        if (!keys.isEmpty()) {
          multiObjectDeleteRequest.setKeys(keys);
          client.deleteObjects(multiObjectDeleteRequest);
        }
      } catch (Throwable ex) {
        logger.warn("Failed to delete S3 repository [{}] in [{}]", ex, bucketName, region);
      }
    }
  }
  @Test
  @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
  public void testEncryption() {
    Client client = client();
    logger.info(
        "-->  creating s3 repository with bucket[{}] and path [{}]",
        internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"),
        basePath);
    PutRepositoryResponse putRepositoryResponse =
        client
            .admin()
            .cluster()
            .preparePutRepository("test-repo")
            .setType("s3")
            .setSettings(
                Settings.settingsBuilder()
                    .put("base_path", basePath)
                    .put("chunk_size", randomIntBetween(1000, 10000))
                    .put("server_side_encryption", true))
            .get();
    assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));

    createIndex("test-idx-1", "test-idx-2", "test-idx-3");
    ensureGreen();

    logger.info("--> indexing some data");
    for (int i = 0; i < 100; i++) {
      index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
      index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
      index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
    }
    refresh();
    assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
    assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
    assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));

    logger.info("--> snapshot");
    CreateSnapshotResponse createSnapshotResponse =
        client
            .admin()
            .cluster()
            .prepareCreateSnapshot("test-repo", "test-snap")
            .setWaitForCompletion(true)
            .setIndices("test-idx-*", "-test-idx-3")
            .get();
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
    assertThat(
        createSnapshotResponse.getSnapshotInfo().successfulShards(),
        equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));

    assertThat(
        client
            .admin()
            .cluster()
            .prepareGetSnapshots("test-repo")
            .setSnapshots("test-snap")
            .get()
            .getSnapshots()
            .get(0)
            .state(),
        equalTo(SnapshotState.SUCCESS));

    Settings settings = internalCluster().getInstance(Settings.class);
    Settings bucket = settings.getByPrefix("repositories.s3.");
    AmazonS3 s3Client =
        internalCluster()
            .getInstance(AwsS3Service.class)
            .client(
                null,
                null,
                bucket.get("region", settings.get("repositories.s3.region")),
                bucket.get("access_key", settings.get("cloud.aws.access_key")),
                bucket.get("secret_key", settings.get("cloud.aws.secret_key")));

    String bucketName = bucket.get("bucket");
    logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
    List<S3ObjectSummary> summaries =
        s3Client.listObjects(bucketName, basePath).getObjectSummaries();
    for (S3ObjectSummary summary : summaries) {
      assertThat(
          s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(),
          equalTo("AES256"));
    }

    logger.info("--> delete some data");
    for (int i = 0; i < 50; i++) {
      client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
    }
    for (int i = 50; i < 100; i++) {
      client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
    }
    for (int i = 0; i < 100; i += 2) {
      client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
    }
    refresh();
    assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
    assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
    assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));

    logger.info("--> close indices");
    client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();

    logger.info("--> restore all indices from the snapshot");
    RestoreSnapshotResponse restoreSnapshotResponse =
        client
            .admin()
            .cluster()
            .prepareRestoreSnapshot("test-repo", "test-snap")
            .setWaitForCompletion(true)
            .execute()
            .actionGet();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));

    ensureGreen();
    assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
    assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
    assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));

    // Test restore after index deletion
    logger.info("--> delete indices");
    cluster().wipeIndices("test-idx-1", "test-idx-2");
    logger.info("--> restore one index after deletion");
    restoreSnapshotResponse =
        client
            .admin()
            .cluster()
            .prepareRestoreSnapshot("test-repo", "test-snap")
            .setWaitForCompletion(true)
            .setIndices("test-idx-*", "-test-idx-2")
            .execute()
            .actionGet();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    ensureGreen();
    assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
    ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
    assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
    assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
  }
  @Override
  protected void doStart() throws ElasticsearchException {
    try {

      final String currentDir = new File(".").getCanonicalPath();
      final String tomcatDir = currentDir + File.separatorChar + "tomcat";

      logger.debug("cur dir " + currentDir);

      if (tomcat != null) {
        try {
          tomcat.stop();
          tomcat.destroy();
        } catch (final Exception e) {

        }
      }

      tomcat = new ExtendedTomcat();
      tomcat.enableNaming();
      tomcat.getServer().setPort(-1); // shutdown disabled
      tomcat.getServer().setAddress("localhost");

      final String httpProtocolImpl =
          blockingServer
              ? "org.apache.coyote.http11.Http11Protocol"
              : "org.apache.coyote.http11.Http11NioProtocol";

      final Connector httpConnector = new Connector(httpProtocolImpl);
      tomcat.setConnector(httpConnector);
      tomcat.getService().addConnector(httpConnector);

      // TODO report tomcat bug with setProtocol

      if (maxContentLength != null) {
        httpConnector.setMaxPostSize(maxContentLength.bytesAsInt());
      }

      if (maxHeaderSize != null) {
        httpConnector.setAttribute("maxHttpHeaderSize", maxHeaderSize.bytesAsInt());
      }

      if (tcpNoDelay != null) {
        httpConnector.setAttribute("tcpNoDelay", tcpNoDelay.booleanValue());
      }

      if (reuseAddress != null) {
        httpConnector.setAttribute("socket.soReuseAddress", reuseAddress.booleanValue());
      }

      if (tcpKeepAlive != null) {
        httpConnector.setAttribute("socket.soKeepAlive", tcpKeepAlive.booleanValue());
        httpConnector.setAttribute(
            "maxKeepAliveRequests", tcpKeepAlive.booleanValue() ? "100" : "1");
      }

      if (tcpReceiveBufferSize != null) {
        httpConnector.setAttribute("socket.rxBufSize", tcpReceiveBufferSize.bytesAsInt());
      }

      if (tcpSendBufferSize != null) {
        httpConnector.setAttribute("socket.txBufSize", tcpSendBufferSize.bytesAsInt());
      }

      httpConnector.setAttribute(
          "compression", compression ? String.valueOf(compressionLevel) : "off");

      if (maxChunkSize != null) {
        httpConnector.setAttribute("maxExtensionSize", maxChunkSize.bytesAsInt());
      }

      httpConnector.setPort(Integer.parseInt(port));

      tomcat.setBaseDir(tomcatDir);

      final TomcatHttpTransportHandlerServlet servlet = new TomcatHttpTransportHandlerServlet();
      servlet.setTransport(this);

      final Context ctx = tomcat.addContext("", currentDir);

      logger.debug("currentDir " + currentDir);

      Tomcat.addServlet(ctx, "ES Servlet", servlet);

      ctx.addServletMapping("/*", "ES Servlet");

      if (useSSL) {
        logger.info("Using SSL");

        // System.setProperty("javax.net.debug", "ssl");
        httpConnector.setAttribute("SSLEnabled", "true");
        httpConnector.setSecure(true);
        httpConnector.setScheme("https");

        httpConnector.setAttribute("sslProtocol", "TLS");

        httpConnector.setAttribute(
            "keystoreFile", settings.get("security.ssl.keystorefile", "keystore"));
        httpConnector.setAttribute(
            "keystorePass", settings.get("security.ssl.keystorepass", "changeit"));
        httpConnector.setAttribute(
            "keystoreType", settings.get("security.ssl.keystoretype", "JKS"));

        final String keyalias = settings.get("security.ssl.keyalias", null);

        if (keyalias != null) {
          httpConnector.setAttribute("keyAlias", keyalias);
        }

        if (useClientAuth) {

          logger.info(
              "Using SSL Client Auth (PKI), so user/roles will be retrieved from client certificate.");

          httpConnector.setAttribute("clientAuth", "true");

          httpConnector.setAttribute(
              "truststoreFile",
              settings.get("security.ssl.clientauth.truststorefile", "truststore"));
          httpConnector.setAttribute(
              "truststorePass", settings.get("security.ssl.clientauth.truststorepass", "changeit"));
          httpConnector.setAttribute(
              "truststoreType", settings.get("security.ssl.clientauth.truststoretype", "JKS"));

          /*final String loginconf = this.settings
          		.get("security.kerberos.login.conf.path");
          final String krbconf = this.settings
          		.get("security.kerberos.krb5.conf.path");

          SecurityUtil.setSystemPropertyToAbsoluteFile(
          		"java.security.auth.login.config", loginconf);
          SecurityUtil.setSystemPropertyToAbsoluteFile(
          		"java.security.krb5.conf", krbconf);*/

          // httpConnector.setAttribute("allowUnsafeLegacyRenegotiation", "true");

          final SecurityConstraint constraint = new SecurityConstraint();
          constraint.addAuthRole("*");
          constraint.setAuthConstraint(true);
          constraint.setUserConstraint("CONFIDENTIAL");

          final SecurityCollection col = new SecurityCollection();
          col.addPattern("/*");

          constraint.addCollection(col);
          ctx.addConstraint(constraint);

          final LoginConfig lc = new LoginConfig();
          lc.setAuthMethod("CLIENT-CERT");
          lc.setRealmName("clientcretificate");
          ctx.setLoginConfig(lc);

          configureJndiRealm(ctx);

          ctx.getPipeline().addValve(new SSLAuthenticator());
          logger.info("Auth Method is CLIENT-CERT");

          // http://pki-tutorial.readthedocs.org/en/latest/simple/

        }

      } else {
        if (useClientAuth) {
          logger.error("Client Auth only available with SSL");
          throw new RuntimeException("Client Auth only available with SSL");
        }

        // useClientAuth = false;
      }

      if (!useClientAuth) {
        if ("waffle".equalsIgnoreCase(kerberosMode)) {

          final Boolean testMode = settings.getAsBoolean("security.waffle.testmode", false);

          final FilterDef fd = new FilterDef();
          fd.setFilterClass("waffle.servlet.NegotiateSecurityFilter");
          fd.setFilterName("Waffle");

          if (testMode != null && testMode.booleanValue()) {

            fd.addInitParameter("principalFormat", "fqn");
            fd.addInitParameter("roleFormat", "both");
            fd.addInitParameter("allowGuestLogin", "true");
            fd.addInitParameter(
                "securityFilterProviders",
                "org.elasticsearch.plugins.security.waffle.TestProvider");

            logger.info(
                "Kerberos implementaton is WAFFLE in testmode (only work on Windows Operations system)");
          } else {
            final Map<String, String> waffleSettings =
                settings.getByPrefix("security.waffle").getAsMap();

            for (final String waffleKey : waffleSettings.keySet()) {

              fd.addInitParameter(waffleKey.substring(1), waffleSettings.get(waffleKey));

              logger.debug(waffleKey.substring(1) + "=" + waffleSettings.get(waffleKey));
            }

            fd.addInitParameter("principalFormat", "fqn");
            fd.addInitParameter("roleFormat", "both");
            fd.addInitParameter("allowGuestLogin", "false");

            logger.info(
                "Kerberos implementaton is WAFFLE (only work on Windows Operations system)");
          }

          ctx.addFilterDef(fd);
          final FilterMap fm = new FilterMap();
          fm.setFilterName("Waffle");
          fm.addURLPattern("/*");
          ctx.addFilterMap(fm);

        } else if ("spnegoad".equalsIgnoreCase(kerberosMode)) {

          // System.setProperty("sun.security.krb5.debug", "true"); // TODO
          // switch
          // off

          System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");

          final SecurityConstraint constraint = new SecurityConstraint();
          constraint.addAuthRole("*");
          constraint.setAuthConstraint(true);
          constraint.setDisplayName("spnego_sc_all");
          final SecurityCollection col = new SecurityCollection();
          col.addPattern("/*");

          constraint.addCollection(col);
          ctx.addConstraint(constraint);

          final LoginConfig lc = new LoginConfig();
          lc.setAuthMethod("SPNEGO");
          lc.setRealmName("SPNEGO");
          ctx.setLoginConfig(lc);

          logger.info("Kerberos implementaton is SPNEGOAD");

          configureJndiRealm(ctx);

          final ExtendedSpnegoAuthenticator spnegoValve = new ExtendedSpnegoAuthenticator();
          // spnegoValve.setLoginConfigName("es-login");
          spnegoValve.setStoreDelegatedCredential(true);
          ctx.getPipeline().addValve(spnegoValve);

          // final SpnegoAuthenticator spnegoValve = new SpnegoAuthenticator();
          // spnegoValve.setLoginEntryName("es-login");
          // ctx.getPipeline().addValve(spnegoValve);

        } else if ("none".equalsIgnoreCase(kerberosMode)) {

          logger.warn(
              "Kerberos is not configured so user/roles are unavailable. Host based security, in contrast, is woking. ");

        } else {
          logger.error(
              "No Kerberos implementaion '"
                  + kerberosMode
                  + "' found. Kerberos is therefore not configured so user/roles are unavailable. Host based security, in contrast, is woking. ");
        }
      }

      tomcat.start();

      logger.info("Tomcat started");

      InetSocketAddress bindAddress;
      try {
        bindAddress =
            new InetSocketAddress(
                networkService.resolveBindHostAddress(bindHost),
                tomcat.getConnector().getLocalPort());
      } catch (final Exception e) {
        throw new BindTransportException("Failed to resolve bind address", e);
      }

      InetSocketAddress publishAddress;
      try {
        publishAddress =
            new InetSocketAddress(
                networkService.resolvePublishHostAddress(publishHost), bindAddress.getPort());
      } catch (final Exception e) {
        throw new BindTransportException("Failed to resolve publish address", e);
      }

      logger.debug("bindAddress " + bindAddress);
      logger.debug("publishAddress " + publishAddress);

      boundAddress =
          new BoundTransportAddress(
              new InetSocketTransportAddress(bindAddress),
              new InetSocketTransportAddress(publishAddress));

    } catch (final Exception e) {
      throw new ElasticsearchException("Unable to start Tomcat", e);
    }
  }