@TestMethodProviders({LuceneJUnit3MethodProvider.class, JUnit4MethodProvider.class})
@Listeners({ReproduceInfoPrinter.class})
@ThreadLeakFilters(
    defaultFilters = true,
    filters = {ElasticsearchTestCase.ElasticSearchThreadFilter.class})
@ThreadLeakScope(Scope.NONE)
@RunWith(value = com.carrotsearch.randomizedtesting.RandomizedRunner.class)
public abstract class ElasticsearchTestCase extends RandomizedTest {

  public static final Version TEST_VERSION_CURRENT = LuceneTestCase.TEST_VERSION_CURRENT;

  protected final ESLogger logger = Loggers.getLogger(getClass());

  public static final boolean NIGHLY =
      Boolean.parseBoolean(System.getProperty("es.tests.nighly", "false")); // disabled by default

  public static final String CHILD_VM_ID =
      System.getProperty("junit4.childvm.id", "" + System.currentTimeMillis());

  public static final String SYSPROP_BADAPPLES = "tests.badapples";

  public static class ElasticSearchThreadFilter implements ThreadFilter {
    @Override
    public boolean reject(Thread t) {

      return true;
    }
  }
}
Exemplo n.º 2
0
public class CLibrary {

  private static ESLogger logger = Loggers.getLogger(CLibrary.class);

  public static final int MCL_CURRENT = 1;
  public static final int MCL_FUTURE = 2;

  public static final int ENOMEM = 12;

  static {
    try {
      Native.register("c");
    } catch (NoClassDefFoundError e) {
      logger.warn("JNA not found. native methods (mlockall) will be disabled.");
    } catch (UnsatisfiedLinkError e) {
      logger.warn("unable to link C library. native methods (mlockall) will be disabled.");
    }
  }

  public static native int mlockall(int flags);

  public static native int munlockall();

  private CLibrary() {}
}
Exemplo n.º 3
0
  @Override
  public Node stop() {
    if (!lifecycle.moveToStopped()) {
      return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: stopping ...", Version.full(), JvmInfo.jvmInfo().pid());

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).stop();
    }
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RiversManager.class).stop();
    injector.getInstance(IndicesClusterStateService.class).stop();
    injector.getInstance(IndicesService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();
    injector.getInstance(JmxService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).stop();
    }

    logger.info("{{}}[{}]: stopped", Version.full(), JvmInfo.jvmInfo().pid());

    return this;
  }
public class ElasticFacetsPlugin extends AbstractPlugin {

  static final ESLogger logger = Loggers.getLogger(ElasticFacetsPlugin.class);

  public ElasticFacetsPlugin() {
    logger.info("ElasticFacets plugin initialized");
  }

  public String name() {
    return "ElasticFacetsPlugin";
  }

  public String description() {
    return "A plugin adding the Faceted Date Histogram facet type.";
  }

  @Override
  public void processModule(Module module) {
    if (module instanceof FacetModule) {
      ((FacetModule) module).addFacetProcessor(FacetedDateHistogramFacetProcessor.class);
      ((FacetModule) module).addFacetProcessor(HashedStringFacetProcessor.class);
    }
    if (module instanceof ActionModule) {
      ((ActionModule) module)
          .registerAction(
              CacheStatsPerFieldAction.INSTANCE, TransportCacheStatsPerFieldAction.class);
    }
    if (module instanceof RestModule) {
      ((RestModule) module).addRestAction(RestCacheStatsPerFieldAction.class);
    }
  }
}
Exemplo n.º 5
0
public class Natives {

  private static ESLogger logger = Loggers.getLogger(Natives.class);
  // Set to true, in case native mlockall call was successful
  public static boolean LOCAL_MLOCKALL = false;

  public static void tryMlockall() {
    int errno = Integer.MIN_VALUE;
    try {
      int result = CLibrary.mlockall(CLibrary.MCL_CURRENT);
      if (result != 0) {
        errno = Native.getLastError();
      } else {
        LOCAL_MLOCKALL = true;
      }
    } catch (UnsatisfiedLinkError e) {
      // this will have already been logged by CLibrary, no need to repeat it
      return;
    }

    if (errno != Integer.MIN_VALUE) {
      if (errno == CLibrary.ENOMEM
          && System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux")) {
        logger.warn(
            "Unable to lock JVM memory (ENOMEM)."
                + " This can result in part of the JVM being swapped out."
                + " Increase RLIMIT_MEMLOCK (ulimit).");
      } else if (!System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("mac")) {
        // OS X allows mlockall to be called, but always returns an error
        logger.warn("Unknown mlockall error " + errno);
      }
    }
  }
}
 /**
  * Create new action listener.
  *
  * @param actionRequest this listener process result for
  * @param restRequest we handle
  * @param restChannel to write rest response into
  */
 public JRMgmBaseActionListener(
     Request actionRequest, RestRequest restRequest, RestChannel restChannel) {
   super();
   this.restRequest = restRequest;
   this.restChannel = restChannel;
   this.actionRequest = actionRequest;
   logger = Loggers.getLogger(getClass());
 }
Exemplo n.º 7
0
  /** initialize native resources */
  public static void initializeNatives(
      Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
    final ESLogger logger = Loggers.getLogger(Bootstrap.class);

    // check if the user is running as root, and bail
    if (Natives.definitelyRunningAsRoot()) {
      if (Boolean.parseBoolean(System.getProperty("es.insecure.allow.root"))) {
        logger.warn("running as ROOT user. this is a bad idea!");
      } else {
        throw new RuntimeException("don't run elasticsearch as root.");
      }
    }

    // enable secure computing mode
    if (seccomp) {
      Natives.trySeccomp(tmpFile);
    }

    // mlockall if requested
    if (mlockAll) {
      if (Constants.WINDOWS) {
        Natives.tryVirtualLock();
      } else {
        Natives.tryMlockall();
      }
    }

    // listener for windows close event
    if (ctrlHandler) {
      Natives.addConsoleCtrlHandler(
          new ConsoleCtrlHandler() {
            @Override
            public boolean handle(int code) {
              if (CTRL_CLOSE_EVENT == code) {
                logger.info("running graceful exit on windows");
                try {
                  Bootstrap.stop();
                } catch (IOException e) {
                  throw new ElasticsearchException("failed to stop node", e);
                }
                return true;
              }
              return false;
            }
          });
    }

    // force remainder of JNA to be loaded (if available).
    try {
      JNAKernel32Library.getInstance();
    } catch (Throwable ignored) {
      // we've already logged this.
    }

    // init lucene random seed. it will use /dev/urandom where available:
    StringHelper.randomId();
  }
Exemplo n.º 8
0
 private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
   if (confFileSetting != null && confFileSetting.isEmpty() == false) {
     ESLogger logger = Loggers.getLogger(Bootstrap.class);
     logger.info(
         "{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.",
         settingName);
     exit(1);
   }
 }
public class RedisUtils {

  private static JedisPool jedisPool;

  public static ESLogger logger = Loggers.getLogger("ansj-redis-utils");

  /**
   * 获取数据库连接
   *
   * @return conn
   */
  public static Jedis getConnection() {
    Jedis jedis = null;
    try {
      jedis = jedisPool.getResource();
    } catch (Exception e) {
      e.printStackTrace();
      logger.error(e.getMessage(), e);
    }
    return jedis;
  }

  /**
   * 关闭数据库连接
   *
   * @param jedis redis.
   */
  public static void closeConnection(Jedis jedis) {
    if (null != jedis) {
      try {
        jedisPool.close();
      } catch (Exception e) {
        // e.printStackTrace();
        logger.error(e.getMessage(), e);
      }
    }
  }

  /**
   * 设置连接池
   *
   * @param JedisPool DataSource
   */
  public static void setJedisPool(JedisPool JedisPool) {
    RedisUtils.jedisPool = JedisPool;
  }

  /**
   * 获取连接池
   *
   * @return 数据源
   */
  public static JedisPool getJedisPool() {
    return jedisPool;
  }
}
Exemplo n.º 10
0
 /**
  * Checks that the current JVM is "ok". This means it doesn't have severe bugs that cause data
  * corruption.
  */
 static void check() {
   if (Boolean.parseBoolean(System.getProperty(JVM_BYPASS))) {
     Loggers.getLogger(JVMCheck.class)
         .warn(
             "bypassing jvm version check for version [{}], this can result in data corruption!",
             fullVersion());
   } else if ("Oracle Corporation".equals(Constants.JVM_VENDOR)) {
     HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION);
     if (bug != null && bug.check()) {
       if (bug.getWorkaround().isPresent()
           && ManagementFactory.getRuntimeMXBean()
               .getInputArguments()
               .contains(bug.getWorkaround().get())) {
         Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get());
       } else {
         throw new RuntimeException(bug.getErrorMessage());
       }
     }
   } else if ("IBM Corporation".equals(Constants.JVM_VENDOR)) {
     // currently some old JVM versions from IBM will easily result in index corruption.
     // 2.8+ seems ok for ES from testing.
     float version = Float.POSITIVE_INFINITY;
     try {
       version = Float.parseFloat(Constants.JVM_VERSION);
     } catch (NumberFormatException ignored) {
       // this is just a simple best-effort to detect old runtimes,
       // if we cannot parse it, we don't fail.
     }
     if (version < 2.8f) {
       StringBuilder sb = new StringBuilder();
       sb.append(
           "IBM J9 runtimes < 2.8 suffer from several bugs which can cause data corruption.");
       sb.append(System.lineSeparator());
       sb.append("Your version: " + fullVersion());
       sb.append(System.lineSeparator());
       sb.append("Please upgrade the JVM to a recent IBM JDK");
       throw new RuntimeException(sb.toString());
     }
   }
 }
public class AllocatePostApiFlagTests {

  private final ESLogger logger = Loggers.getLogger(AllocatePostApiFlagTests.class);

  @Test
  public void simpleFlagTests() {
    AllocationService allocation =
        new AllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("creating an index with 1 shard, no replica");
    MetaData metaData =
        newMetaDataBuilder()
            .put(newIndexMetaDataBuilder("test").numberOfShards(1).numberOfReplicas(0))
            .build();
    RoutingTable routingTable = routingTable().addAsNew(metaData.index("test")).build();
    ClusterState clusterState =
        newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(false));

    logger.info("adding two nodes and performing rerouting");
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .nodes(newNodesBuilder().put(newNode("node1")).put(newNode("node2")))
            .build();
    RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .routingTable(rerouteResult.routingTable())
            .build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(false));

    logger.info("start primary shard");
    rerouteResult =
        allocation.applyStartedShards(
            clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .routingTable(rerouteResult.routingTable())
            .build();
    assertThat(
        clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(),
        equalTo(true));
  }
}
public class StatsdMockServer extends Thread {

  private int port;
  public Collection<String> content = new ArrayList<>();
  private DatagramSocket socket;
  private boolean isClosed = false;
  private final ESLogger logger = Loggers.getLogger(getClass());

  public StatsdMockServer(int port) {
    this.port = port;
  }

  @Override
  public void run() {
    try {
      socket = new DatagramSocket(port);

      while (!isClosed) {
        if (socket.isClosed()) return;

        byte[] buf = new byte[256];

        // receive request
        DatagramPacket packet = new DatagramPacket(buf, buf.length);
        socket.receive(packet);

        ByteArrayInputStream bis = new ByteArrayInputStream(buf, 0, packet.getLength());
        BufferedReader in = new BufferedReader(new InputStreamReader(bis));

        String msg;
        while ((msg = in.readLine()) != null) {
          logger.debug("Read from socket: " + msg);
          content.add(msg.trim());
        }
        in.close();
      }

    } catch (IOException e) {
      e.printStackTrace();
    }
  }

  public void close() throws Exception {
    isClosed = true;
    socket.close();
  }

  public void resetContents() {
    this.content = new ArrayList<>();
  }
}
public class SessionAwareNettyHttpChannel extends RestChannel {

  protected final ESLogger log = Loggers.getLogger(this.getClass());
  private final SessionStore sessionStore;
  private final RestChannel channel;

  public SessionAwareNettyHttpChannel(
      final RestChannel channel,
      final SessionStore sessionStore,
      final boolean detailedErrorsEnabled) {
    super(channel.request(), detailedErrorsEnabled);
    this.channel = channel;
    this.sessionStore = sessionStore;
  }

  @Override
  public void sendResponse(final RestResponse response) {

    final User user = this.request.getFromContext("searchguard_authenticated_user");
    final Session _session =
        sessionStore.getSession(SecurityUtil.getSearchGuardSessionIdFromCookie(request));

    if (user != null) {
      if (_session == null) {
        final Session session = sessionStore.createSession(user);
        log.trace("Create session and set cookie for {}", user.getName());
        final CookieEncoder encoder = new CookieEncoder(true);
        final Cookie cookie = new DefaultCookie("es_searchguard_session", session.getId());

        // TODO FUTURE check cookie domain/path
        // cookie.setDomain(arg0);
        // cookie.setPath(arg0);

        cookie.setDiscard(true);
        cookie.setSecure(((NettyHttpRequest) request).request() instanceof DefaultHttpsRequest);
        cookie.setMaxAge(60 * 60); // 1h
        cookie.setHttpOnly(true);
        encoder.addCookie(cookie);
        response.addHeader("Set-Cookie", encoder.encode());
      } else {

        // Set-Cookie: token=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT
        log.trace("There is already a session");
        // TODO FUTURE check cookie seesion validity, expire, ...

      }
    }

    channel.sendResponse(response);
  }
}
Exemplo n.º 14
0
  @Override
  public Node stop() {
    if (!lifecycle.moveToStopped()) {
      return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("stopping ...");

    injector.getInstance(TribeService.class).stop();
    injector.getInstance(ResourceWatcherService.class).stop();
    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).stop();
    }

    injector.getInstance(MappingUpdatedAction.class).stop();
    injector.getInstance(RiversManager.class).stop();

    injector.getInstance(SnapshotsService.class).stop();
    // stop any changes happening as a result of cluster state changes
    injector.getInstance(IndicesClusterStateService.class).stop();
    // we close indices first, so operations won't be allowed on it
    injector.getInstance(IndexingMemoryController.class).stop();
    injector.getInstance(IndicesTTLService.class).stop();
    injector.getInstance(IndicesService.class).stop();
    // sleep a bit to let operations finish with indices service
    //        try {
    //            Thread.sleep(500);
    //        } catch (InterruptedException e) {
    //            // ignore
    //        }
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).stop();
    }

    logger.info("stopped");

    return this;
  }
Exemplo n.º 15
0
  public Node start() {
    if (!lifecycle.moveToStarted()) {
      return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("starting ...");

    // hack around dependency injection problem (for now...)
    injector
        .getInstance(Discovery.class)
        .setAllocationService(injector.getInstance(AllocationService.class));

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).start();
    }

    injector.getInstance(MappingUpdatedAction.class).start();
    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(IndicesTTLService.class).start();
    injector.getInstance(RiversManager.class).start();
    injector.getInstance(SnapshotsService.class).start();
    injector.getInstance(TransportService.class).start();
    injector.getInstance(ClusterService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
    discoService.waitForInitialState();

    // gateway should start after disco, so it can try and recovery from gateway on "start"
    injector.getInstance(GatewayService.class).start();

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).start();
    }
    injector.getInstance(ResourceWatcherService.class).start();
    injector.getInstance(TribeService.class).start();

    logger.info("started");

    return this;
  }
Exemplo n.º 16
0
  public InternalNode(Settings pSettings, boolean loadConfigSettings)
      throws ElasticSearchException {
    Tuple<Settings, Environment> tuple =
        InternalSettingsPerparer.prepareSettings(pSettings, loadConfigSettings);

    ESLogger logger = Loggers.getLogger(Node.class, tuple.v1().get("name"));
    logger.info("{{}}[{}]: initializing ...", Version.full(), JvmInfo.jvmInfo().pid());

    this.pluginsService = new PluginsService(tuple.v1(), tuple.v2());
    this.settings = pluginsService.updatedSettings();
    this.environment = tuple.v2();

    ModulesBuilder modules = new ModulesBuilder();
    modules.add(new PluginsModule(settings, pluginsService));
    modules.add(new SettingsModule(settings));
    modules.add(new NodeModule(this));
    modules.add(new NetworkModule());
    modules.add(new NodeCacheModule(settings));
    modules.add(new ScriptModule());
    modules.add(new JmxModule(settings));
    modules.add(new EnvironmentModule(environment));
    modules.add(new NodeEnvironmentModule());
    modules.add(new ClusterNameModule(settings));
    modules.add(new ThreadPoolModule(settings));
    modules.add(new TimerModule());
    modules.add(new DiscoveryModule(settings));
    modules.add(new ClusterModule(settings));
    modules.add(new RestModule(settings));
    modules.add(new TransportModule(settings));
    if (settings.getAsBoolean("http.enabled", true)) {
      modules.add(new HttpServerModule(settings));
    }
    modules.add(new RiversModule(settings));
    modules.add(new IndicesModule(settings));
    modules.add(new SearchModule());
    modules.add(new TransportActionModule());
    modules.add(new MonitorModule(settings));
    modules.add(new GatewayModule(settings));
    modules.add(new NodeClientModule());

    injector = modules.createInjector();

    client = injector.getInstance(Client.class);

    logger.info("{{}}[{}]: initialized", Version.full(), JvmInfo.jvmInfo().pid());
  }
 private static double[] randomPercentiles() {
   final int length = randomIntBetween(1, 20);
   final double[] percentiles = new double[length];
   for (int i = 0; i < percentiles.length; ++i) {
     switch (randomInt(20)) {
       case 0:
         percentiles[i] = 0;
         break;
       case 1:
         percentiles[i] = 100;
         break;
       default:
         percentiles[i] = randomDouble() * 100;
         break;
     }
   }
   Arrays.sort(percentiles);
   Loggers.getLogger(TDigestPercentilesIT.class)
       .info("Using percentiles={}", Arrays.toString(percentiles));
   return percentiles;
 }
/**
 * Represents a lte assert section:
 *
 * <p>- lte: { fields._ttl: 0 }
 */
public class LessThanOrEqualToAssertion extends Assertion {

  private static final Logger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class);

  public LessThanOrEqualToAssertion(XContentLocation location, String field, Object expectedValue) {
    super(location, field, expectedValue);
  }

  @Override
  protected void doAssert(Object actualValue, Object expectedValue) {
    logger.trace(
        "assert that [{}] is less than or equal to [{}] (field: [{}])",
        actualValue,
        expectedValue,
        getField());
    assertThat(
        "value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])",
        actualValue,
        instanceOf(Comparable.class));
    assertThat(
        "expected value of ["
            + getField()
            + "] is not comparable (got ["
            + expectedValue.getClass()
            + "])",
        expectedValue,
        instanceOf(Comparable.class));
    try {
      assertThat(
          errorMessage(), (Comparable) actualValue, lessThanOrEqualTo((Comparable) expectedValue));
    } catch (ClassCastException e) {
      fail("cast error while checking (" + errorMessage() + "): " + e);
    }
  }

  private String errorMessage() {
    return "field [" + getField() + "] is not less than or equal to [" + getExpectedValue() + "]";
  }
}
Exemplo n.º 19
0
  public Node start() {
    if (!lifecycle.moveToStarted()) {
      return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: starting ...", Version.full(), JvmInfo.jvmInfo().pid());

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).start();
    }

    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryBufferController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(RiversManager.class).start();
    injector.getInstance(ClusterService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();
    injector.getInstance(TransportService.class).start();
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();

    // gateway should start after disco, so it can try and recovery from gateway on "start"
    injector.getInstance(GatewayService.class).start();

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).start();
    }
    injector
        .getInstance(JmxService.class)
        .connectAndRegister(
            discoService.nodeDescription(), injector.getInstance(NetworkService.class));

    logger.info("{{}}[{}]: started", Version.full(), JvmInfo.jvmInfo().pid());

    return this;
  }
Exemplo n.º 20
0
  @SuppressWarnings("unchecked")
  public void onModule(RepositoriesModule repositoriesModule) {
    String baseLib = detectLibFolder();
    List<URL> cp = getHadoopClassLoaderPath(baseLib);

    ClassLoader hadoopCL =
        URLClassLoader.newInstance(cp.toArray(new URL[cp.size()]), getClass().getClassLoader());

    Class<? extends Repository> repository = null;
    try {
      repository =
          (Class<? extends Repository>)
              hadoopCL.loadClass("org.elasticsearch.repositories.hdfs.HdfsRepository");
    } catch (ClassNotFoundException cnfe) {
      throw new IllegalStateException(
          "Cannot load plugin class; is the plugin class setup correctly?", cnfe);
    }

    repositoriesModule.registerRepository("hdfs", repository, BlobStoreIndexShardRepository.class);
    Loggers.getLogger(HdfsPlugin.class)
        .info("Loaded Hadoop [{}] libraries from {}", getHadoopVersion(hadoopCL), baseLib);
  }
public class ReplicaLevelTests extends AbstractNodeTest {

  private static final ESLogger logger = Loggers.getLogger(ReplicaLevelTests.class);

  @Test
  public void testReplicaLevel() throws IOException {

    int numberOfShards = 5;
    int replicaLevel = 4;
    int shardsAfterReplica = 0;

    final AbstractIngestClient es =
        new IngestClient()
            .newClient(ADDRESS)
            .setIndex("replicatest")
            .setType("replicatest")
            .numberOfShards(numberOfShards)
            .numberOfReplicas(0)
            .dateDetection(false)
            .timeStampFieldEnabled(false)
            .newIndex();

    try {
      for (int i = 0; i < 12345; i++) {
        es.indexDocument(
            "replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}");
      }
      es.flush();
      shardsAfterReplica = es.updateReplicaLevel(replicaLevel);
      logger.info("shardsAfterReplica={}", shardsAfterReplica);
    } catch (NoNodeAvailableException e) {
      logger.warn("skipping, no node available");
    } finally {
      // assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
      es.shutdown();
    }
  }
}
Exemplo n.º 22
0
// not an example of how to write code!!!
final class Seccomp {
  private static final ESLogger logger = Loggers.getLogger(Seccomp.class);

  // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering

  /** Access to non-standard Linux libc methods */
  static interface LinuxLibrary extends Library {
    /** maps to prctl(2) */
    int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5);
    /**
     * used to call seccomp(2), its too new... this is the only way, DONT use it on some other
     * architecture unless you know wtf you are doing
     */
    NativeLong syscall(NativeLong number, Object... args);
  };

  // null if unavailable or something goes wrong.
  private static final LinuxLibrary linux_libc;

  static {
    LinuxLibrary lib = null;
    if (Constants.LINUX) {
      try {
        lib = (LinuxLibrary) Native.loadLibrary("c", LinuxLibrary.class);
      } catch (UnsatisfiedLinkError e) {
        logger.warn("unable to link C library. native methods (seccomp) will be disabled.", e);
      }
    }
    linux_libc = lib;
  }

  /** the preferred method is seccomp(2), since we can apply to all threads of the process */
  static final int SECCOMP_SET_MODE_FILTER = 1; // since Linux 3.17

  static final int SECCOMP_FILTER_FLAG_TSYNC = 1; // since Linux 3.17

  /** otherwise, we can use prctl(2), which will at least protect ES application threads */
  static final int PR_GET_NO_NEW_PRIVS = 39; // since Linux 3.5

  static final int PR_SET_NO_NEW_PRIVS = 38; // since Linux 3.5
  static final int PR_GET_SECCOMP = 21; // since Linux 2.6.23
  static final int PR_SET_SECCOMP = 22; // since Linux 2.6.23
  static final long SECCOMP_MODE_FILTER = 2; // since Linux Linux 3.5

  /** corresponds to struct sock_filter */
  static final class SockFilter {
    short code; // insn
    byte jt; // number of insn to jump (skip) if true
    byte jf; // number of insn to jump (skip) if false
    int k; // additional data

    SockFilter(short code, byte jt, byte jf, int k) {
      this.code = code;
      this.jt = jt;
      this.jf = jf;
      this.k = k;
    }
  }

  /** corresponds to struct sock_fprog */
  public static final class SockFProg extends Structure implements Structure.ByReference {
    public short len; // number of filters
    public Pointer filter; // filters

    public SockFProg(SockFilter filters[]) {
      len = (short) filters.length;
      // serialize struct sock_filter * explicitly, its less confusing than the JNA magic we would
      // need
      Memory filter = new Memory(len * 8);
      ByteBuffer bbuf = filter.getByteBuffer(0, len * 8);
      bbuf.order(ByteOrder.nativeOrder()); // little endian
      for (SockFilter f : filters) {
        bbuf.putShort(f.code);
        bbuf.put(f.jt);
        bbuf.put(f.jf);
        bbuf.putInt(f.k);
      }
      this.filter = filter;
    }

    @Override
    protected List<String> getFieldOrder() {
      return Arrays.asList(new String[] {"len", "filter"});
    }
  }

  // BPF "macros" and constants
  static final int BPF_LD = 0x00;
  static final int BPF_W = 0x00;
  static final int BPF_ABS = 0x20;
  static final int BPF_JMP = 0x05;
  static final int BPF_JEQ = 0x10;
  static final int BPF_JGE = 0x30;
  static final int BPF_JGT = 0x20;
  static final int BPF_RET = 0x06;
  static final int BPF_K = 0x00;

  static SockFilter BPF_STMT(int code, int k) {
    return new SockFilter((short) code, (byte) 0, (byte) 0, k);
  }

  static SockFilter BPF_JUMP(int code, int k, int jt, int jf) {
    return new SockFilter((short) code, (byte) jt, (byte) jf, k);
  }

  static final int SECCOMP_RET_ERRNO = 0x00050000;
  static final int SECCOMP_RET_DATA = 0x0000FFFF;
  static final int SECCOMP_RET_ALLOW = 0x7FFF0000;

  // some errno constants for error checking/handling
  static final int EACCES = 0x0D;
  static final int EFAULT = 0x0E;
  static final int EINVAL = 0x16;
  static final int ENOSYS = 0x26;

  // offsets that our BPF checks
  // check with offsetof() when adding a new arch, move to Arch if different.
  static final int SECCOMP_DATA_NR_OFFSET = 0x00;
  static final int SECCOMP_DATA_ARCH_OFFSET = 0x04;

  static class Arch {
    /** AUDIT_ARCH_XXX constant from linux/audit.h */
    final int audit;
    /** syscall limit (necessary for blacklisting on amd64, to ban 32-bit syscalls) */
    final int limit;
    /** __NR_fork */
    final int fork;
    /** __NR_vfork */
    final int vfork;
    /** __NR_execve */
    final int execve;
    /** __NR_execveat */
    final int execveat;
    /** __NR_seccomp */
    final int seccomp;

    Arch(int audit, int limit, int fork, int vfork, int execve, int execveat, int seccomp) {
      this.audit = audit;
      this.limit = limit;
      this.fork = fork;
      this.vfork = vfork;
      this.execve = execve;
      this.execveat = execveat;
      this.seccomp = seccomp;
    }
  }

  /** supported architectures map keyed by os.arch */
  private static final Map<String, Arch> ARCHITECTURES;

  static {
    Map<String, Arch> m = new HashMap<>();
    m.put("amd64", new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317));
    m.put("i386", new Arch(0x40000003, 0xFFFFFFFF, 2, 190, 11, 358, 354));
    ARCHITECTURES = Collections.unmodifiableMap(m);
  }

  /** invokes prctl() from linux libc library */
  private static int linux_prctl(int option, long arg2, long arg3, long arg4, long arg5) {
    return linux_libc.prctl(
        option,
        new NativeLong(arg2),
        new NativeLong(arg3),
        new NativeLong(arg4),
        new NativeLong(arg5));
  }

  /** invokes syscall() from linux libc library */
  private static long linux_syscall(long number, Object... args) {
    return linux_libc.syscall(new NativeLong(number), args).longValue();
  }

  /** try to install our BPF filters via seccomp() or prctl() to block execution */
  private static int linuxImpl() {
    // first be defensive: we can give nice errors this way, at the very least.
    // also, some of these security features get backported to old versions, checking kernel version
    // here is a big no-no!
    final Arch arch = ARCHITECTURES.get(Constants.OS_ARCH);
    boolean supported = Constants.LINUX && arch != null;
    if (supported == false) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: '" + Constants.OS_ARCH + "' architecture unsupported");
    }

    // we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug
    if (linux_libc == null) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: could not link methods. requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
    }

    // pure paranoia:

    // check that unimplemented syscalls actually return ENOSYS
    // you never know (e.g. https://code.google.com/p/chromium/issues/detail?id=439795)
    if (linux_syscall(999) >= 0 || Native.getLastError() != ENOSYS) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: your kernel is buggy and you should upgrade");
    }

    // try to check system calls really are who they claim
    // you never know (e.g.
    // https://chromium.googlesource.com/chromium/src.git/+/master/sandbox/linux/seccomp-bpf/sandbox_bpf.cc#57)
    final int bogusArg = 0xf7a46a5c;

    // test seccomp(BOGUS)
    long ret = linux_syscall(arch.seccomp, bogusArg);
    if (ret != -1) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: seccomp(BOGUS_OPERATION) returned " + ret);
    } else {
      int errno = Native.getLastError();
      switch (errno) {
        case ENOSYS:
          break; // ok
        case EINVAL:
          break; // ok
        default:
          throw new UnsupportedOperationException(
              "seccomp(BOGUS_OPERATION): " + JNACLibrary.strerror(errno));
      }
    }

    // test seccomp(VALID, BOGUS)
    ret = linux_syscall(arch.seccomp, SECCOMP_SET_MODE_FILTER, bogusArg);
    if (ret != -1) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG) returned " + ret);
    } else {
      int errno = Native.getLastError();
      switch (errno) {
        case ENOSYS:
          break; // ok
        case EINVAL:
          break; // ok
        default:
          throw new UnsupportedOperationException(
              "seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno));
      }
    }

    // test prctl(BOGUS)
    ret = linux_prctl(bogusArg, 0, 0, 0, 0);
    if (ret != -1) {
      throw new UnsupportedOperationException(
          "seccomp unavailable: prctl(BOGUS_OPTION) returned " + ret);
    } else {
      int errno = Native.getLastError();
      switch (errno) {
        case ENOSYS:
          break; // ok
        case EINVAL:
          break; // ok
        default:
          throw new UnsupportedOperationException(
              "prctl(BOGUS_OPTION): " + JNACLibrary.strerror(errno));
      }
    }

    // now just normal defensive checks

    // check for GET_NO_NEW_PRIVS
    switch (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0)) {
      case 0:
        break; // not yet set
      case 1:
        break; // already set by caller
      default:
        int errno = Native.getLastError();
        if (errno == EINVAL) {
          // friendly error, this will be the typical case for an old kernel
          throw new UnsupportedOperationException(
              "seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
        } else {
          throw new UnsupportedOperationException(
              "prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
        }
    }
    // check for SECCOMP
    switch (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0)) {
      case 0:
        break; // not yet set
      case 2:
        break; // already in filter mode by caller
      default:
        int errno = Native.getLastError();
        if (errno == EINVAL) {
          throw new UnsupportedOperationException(
              "seccomp unavailable: CONFIG_SECCOMP not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
        } else {
          throw new UnsupportedOperationException(
              "prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno));
        }
    }
    // check for SECCOMP_MODE_FILTER
    if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0, 0, 0) != 0) {
      int errno = Native.getLastError();
      switch (errno) {
        case EFAULT:
          break; // available
        case EINVAL:
          throw new UnsupportedOperationException(
              "seccomp unavailable: CONFIG_SECCOMP_FILTER not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
        default:
          throw new UnsupportedOperationException(
              "prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno));
      }
    }

    // ok, now set PR_SET_NO_NEW_PRIVS, needed to be able to set a seccomp filter as ordinary user
    if (linux_prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) != 0) {
      throw new UnsupportedOperationException(
          "prctl(PR_SET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()));
    }

    // check it worked
    if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) {
      throw new UnsupportedOperationException(
          "seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): "
              + JNACLibrary.strerror(Native.getLastError()));
    }

    // BPF installed to check arch, limit, then syscall. See
    // https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
    SockFilter insns[] = {
      /* 1  */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), //
      /* 2  */ BPF_JUMP(
          BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail;
      /* 3  */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_NR_OFFSET), //
      /* 4  */ BPF_JUMP(
          BPF_JMP + BPF_JGT + BPF_K, arch.limit, 5, 0), // if (syscall > LIMIT) goto fail;
      /* 5  */ BPF_JUMP(
          BPF_JMP + BPF_JEQ + BPF_K, arch.fork, 4, 0), // if (syscall == FORK) goto fail;
      /* 6  */ BPF_JUMP(
          BPF_JMP + BPF_JEQ + BPF_K, arch.vfork, 3, 0), // if (syscall == VFORK) goto fail;
      /* 7  */ BPF_JUMP(
          BPF_JMP + BPF_JEQ + BPF_K, arch.execve, 2, 0), // if (syscall == EXECVE) goto fail;
      /* 8  */ BPF_JUMP(
          BPF_JMP + BPF_JEQ + BPF_K, arch.execveat, 1, 0), // if (syscall == EXECVEAT) goto fail;
      /* 9  */ BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW), // pass: return OK;
      /* 10 */ BPF_STMT(
          BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EACCES & SECCOMP_RET_DATA)), // fail: return EACCES;
    };
    // seccomp takes a long, so we pass it one explicitly to keep the JNA simple
    SockFProg prog = new SockFProg(insns);
    prog.write();
    long pointer = Pointer.nativeValue(prog.getPointer());

    int method = 1;
    // install filter, if this works, after this there is no going back!
    // first try it with seccomp(SECCOMP_SET_MODE_FILTER), falling back to prctl()
    if (linux_syscall(
            arch.seccomp,
            SECCOMP_SET_MODE_FILTER,
            SECCOMP_FILTER_FLAG_TSYNC,
            new NativeLong(pointer))
        != 0) {
      method = 0;
      int errno1 = Native.getLastError();
      if (logger.isDebugEnabled()) {
        logger.debug(
            "seccomp(SECCOMP_SET_MODE_FILTER): "
                + JNACLibrary.strerror(errno1)
                + ", falling back to prctl(PR_SET_SECCOMP)...");
      }
      if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
        int errno2 = Native.getLastError();
        throw new UnsupportedOperationException(
            "seccomp(SECCOMP_SET_MODE_FILTER): "
                + JNACLibrary.strerror(errno1)
                + ", prctl(PR_SET_SECCOMP): "
                + JNACLibrary.strerror(errno2));
      }
    }

    // now check that the filter was really installed, we should be in filter mode.
    if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) {
      throw new UnsupportedOperationException(
          "seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): "
              + JNACLibrary.strerror(Native.getLastError()));
    }

    logger.debug(
        "Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app");
    return method;
  }

  // OS X implementation via sandbox(7)

  /** Access to non-standard OS X libc methods */
  static interface MacLibrary extends Library {
    /** maps to sandbox_init(3), since Leopard */
    int sandbox_init(String profile, long flags, PointerByReference errorbuf);

    /** releases memory when an error occurs during initialization (e.g. syntax bug) */
    void sandbox_free_error(Pointer errorbuf);
  }

  // null if unavailable, or something goes wrong.
  private static final MacLibrary libc_mac;

  static {
    MacLibrary lib = null;
    if (Constants.MAC_OS_X) {
      try {
        lib = (MacLibrary) Native.loadLibrary("c", MacLibrary.class);
      } catch (UnsatisfiedLinkError e) {
        logger.warn("unable to link C library. native methods (seatbelt) will be disabled.", e);
      }
    }
    libc_mac = lib;
  }

  /** The only supported flag... */
  static final int SANDBOX_NAMED = 1;
  /** Allow everything except process fork and execution */
  static final String SANDBOX_RULES =
      "(version 1) (allow default) (deny process-fork) (deny process-exec)";

  /** try to install our custom rule profile into sandbox_init() to block execution */
  private static void macImpl(Path tmpFile) throws IOException {
    // first be defensive: we can give nice errors this way, at the very least.
    boolean supported = Constants.MAC_OS_X;
    if (supported == false) {
      throw new IllegalStateException(
          "bug: should not be trying to initialize seatbelt for an unsupported OS");
    }

    // we couldn't link methods, could be some really ancient OS X (< Leopard) or some bug
    if (libc_mac == null) {
      throw new UnsupportedOperationException(
          "seatbelt unavailable: could not link methods. requires Leopard or above.");
    }

    // write rules to a temporary file, which will be passed to sandbox_init()
    Path rules = Files.createTempFile(tmpFile, "es", "sb");
    Files.write(rules, Collections.singleton(SANDBOX_RULES));

    boolean success = false;
    try {
      PointerByReference errorRef = new PointerByReference();
      int ret = libc_mac.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef);
      // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the
      // buffer
      if (ret != 0) {
        Pointer errorBuf = errorRef.getValue();
        RuntimeException e =
            new UnsupportedOperationException("sandbox_init(): " + errorBuf.getString(0));
        libc_mac.sandbox_free_error(errorBuf);
        throw e;
      }
      logger.debug("OS X seatbelt initialization successful");
      success = true;
    } finally {
      if (success) {
        Files.delete(rules);
      } else {
        IOUtils.deleteFilesIgnoringExceptions(rules);
      }
    }
  }

  // Solaris implementation via priv_set(3C)

  /** Access to non-standard Solaris libc methods */
  static interface SolarisLibrary extends Library {
    /** see priv_set(3C), a convenience method for setppriv(2). */
    int priv_set(int op, String which, String... privs);
  }

  // null if unavailable, or something goes wrong.
  private static final SolarisLibrary libc_solaris;

  static {
    SolarisLibrary lib = null;
    if (Constants.SUN_OS) {
      try {
        lib = (SolarisLibrary) Native.loadLibrary("c", SolarisLibrary.class);
      } catch (UnsatisfiedLinkError e) {
        logger.warn("unable to link C library. native methods (priv_set) will be disabled.", e);
      }
    }
    libc_solaris = lib;
  }

  // constants for priv_set(2)
  static final int PRIV_OFF = 1;
  static final String PRIV_ALLSETS = null;
  // see privileges(5) for complete list of these
  static final String PRIV_PROC_FORK = "proc_fork";
  static final String PRIV_PROC_EXEC = "proc_exec";

  static void solarisImpl() {
    // first be defensive: we can give nice errors this way, at the very least.
    boolean supported = Constants.SUN_OS;
    if (supported == false) {
      throw new IllegalStateException(
          "bug: should not be trying to initialize priv_set for an unsupported OS");
    }

    // we couldn't link methods, could be some really ancient Solaris or some bug
    if (libc_solaris == null) {
      throw new UnsupportedOperationException(
          "priv_set unavailable: could not link methods. requires Solaris 10+");
    }

    // drop a null-terminated list of privileges
    if (libc_solaris.priv_set(PRIV_OFF, PRIV_ALLSETS, PRIV_PROC_FORK, PRIV_PROC_EXEC, null) != 0) {
      throw new UnsupportedOperationException(
          "priv_set unavailable: priv_set(): " + JNACLibrary.strerror(Native.getLastError()));
    }

    logger.debug("Solaris priv_set initialization successful");
  }

  // BSD implementation via setrlimit(2)

  // TODO: add OpenBSD to Lucene Constants
  // TODO: JNA doesn't have netbsd support, but this mechanism should work there too.
  static final boolean OPENBSD = Constants.OS_NAME.startsWith("OpenBSD");

  // not a standard limit, means something different on linux, etc!
  static final int RLIMIT_NPROC = 7;

  static void bsdImpl() {
    boolean supported = Constants.FREE_BSD || OPENBSD || Constants.MAC_OS_X;
    if (supported == false) {
      throw new IllegalStateException(
          "bug: should not be trying to initialize RLIMIT_NPROC for an unsupported OS");
    }

    JNACLibrary.Rlimit limit = new JNACLibrary.Rlimit();
    limit.rlim_cur.setValue(0);
    limit.rlim_max.setValue(0);
    if (JNACLibrary.setrlimit(RLIMIT_NPROC, limit) != 0) {
      throw new UnsupportedOperationException(
          "RLIMIT_NPROC unavailable: " + JNACLibrary.strerror(Native.getLastError()));
    }

    logger.debug("BSD RLIMIT_NPROC initialization successful");
  }

  // windows impl via job ActiveProcessLimit

  static void windowsImpl() {
    if (!Constants.WINDOWS) {
      throw new IllegalStateException(
          "bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS");
    }

    JNAKernel32Library lib = JNAKernel32Library.getInstance();

    // create a new Job
    Pointer job = lib.CreateJobObjectW(null, null);
    if (job == null) {
      throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError());
    }

    try {
      // retrieve the current basic limits of the job
      int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS;
      JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits =
          new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION();
      limits.write();
      if (!lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null)) {
        throw new UnsupportedOperationException(
            "QueryInformationJobObject: " + Native.getLastError());
      }
      limits.read();
      // modify the number of active processes to be 1 (exactly the one process we will add to the
      // job).
      limits.ActiveProcessLimit = 1;
      limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS;
      limits.write();
      if (!lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size())) {
        throw new UnsupportedOperationException(
            "SetInformationJobObject: " + Native.getLastError());
      }
      // assign ourselves to the job
      if (!lib.AssignProcessToJobObject(job, lib.GetCurrentProcess())) {
        throw new UnsupportedOperationException(
            "AssignProcessToJobObject: " + Native.getLastError());
      }
    } finally {
      lib.CloseHandle(job);
    }

    logger.debug("Windows ActiveProcessLimit initialization successful");
  }

  /**
   * Attempt to drop the capability to execute for the process.
   *
   * <p>This is best effort and OS and architecture dependent. It may throw any Throwable.
   *
   * @return 0 if we can do this for application threads, 1 for the entire process
   */
  static int init(Path tmpFile) throws Throwable {
    if (Constants.LINUX) {
      return linuxImpl();
    } else if (Constants.MAC_OS_X) {
      // try to enable both mechanisms if possible
      bsdImpl();
      macImpl(tmpFile);
      return 1;
    } else if (Constants.SUN_OS) {
      solarisImpl();
      return 1;
    } else if (Constants.FREE_BSD || OPENBSD) {
      bsdImpl();
      return 1;
    } else if (Constants.WINDOWS) {
      windowsImpl();
      return 1;
    } else {
      throw new UnsupportedOperationException(
          "syscall filtering not supported for OS: '" + Constants.OS_NAME + "'");
    }
  }
}
public class ShardsLimitAllocationTests {

  private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);

  @Test
  public void indexLevelShardsLimitAllocate() {
    AllocationService strategy =
        new AllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        newMetaDataBuilder()
            .put(
                newIndexMetaDataBuilder("test")
                    .settings(
                        ImmutableSettings.settingsBuilder()
                            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
                            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
                            .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
            .build();

    RoutingTable routingTable = routingTable().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
    logger.info("Adding two nodes and performing rerouting");
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .nodes(newNodesBuilder().put(newNode("node1")).put(newNode("node2")))
            .build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node1")
            .numberOfShardsWithState(ShardRoutingState.INITIALIZING),
        equalTo(2));
    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node2")
            .numberOfShardsWithState(ShardRoutingState.INITIALIZING),
        equalTo(2));

    logger.info("Start the primary shards");
    RoutingNodes routingNodes = clusterState.routingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node1")
            .numberOfShardsWithState(ShardRoutingState.STARTED),
        equalTo(2));
    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node1")
            .numberOfShardsWithState(ShardRoutingState.INITIALIZING),
        equalTo(0));
    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node2")
            .numberOfShardsWithState(ShardRoutingState.STARTED),
        equalTo(2));
    assertThat(
        clusterState
            .readOnlyRoutingNodes()
            .node("node2")
            .numberOfShardsWithState(ShardRoutingState.INITIALIZING),
        equalTo(0));
    assertThat(clusterState.readOnlyRoutingNodes().unassigned().size(), equalTo(4));

    logger.info("Do another reroute, make sure its still not allocated");
    routingNodes = clusterState.routingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
  }

  @Test
  public void indexLevelShardsLimitRemain() {
    AllocationService strategy =
        new AllocationService(
            settingsBuilder()
                .put("cluster.routing.allocation.concurrent_recoveries", 10)
                .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
                .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
                .put("cluster.routing.allocation.balance.index", 0.0f)
                .put("cluster.routing.allocation.balance.replica", 1.0f)
                .put("cluster.routing.allocation.balance.primary", 0.0f)
                .build());

    logger.info("Building initial routing table");

    MetaData metaData =
        newMetaDataBuilder()
            .put(
                newIndexMetaDataBuilder("test")
                    .settings(
                        ImmutableSettings.settingsBuilder()
                            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
                            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)))
            .build();

    RoutingTable routingTable = routingTable().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        newClusterStateBuilder().metaData(metaData).routingTable(routingTable).build();
    logger.info("Adding one node and reroute");
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .nodes(newNodesBuilder().put(newNode("node1")))
            .build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    logger.info("Start the primary shards");
    RoutingNodes routingNodes = clusterState.routingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    assertThat(clusterState.readOnlyRoutingNodes().numberOfShardsOfType(STARTED), equalTo(5));

    logger.info("add another index with 5 shards");
    metaData =
        newMetaDataBuilder()
            .metaData(metaData)
            .put(
                newIndexMetaDataBuilder("test1")
                    .settings(
                        ImmutableSettings.settingsBuilder()
                            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
                            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)))
            .build();
    routingTable =
        routingTable().routingTable(routingTable).addAsNew(metaData.index("test1")).build();

    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();

    logger.info("Add another one node and reroute");
    clusterState =
        newClusterStateBuilder()
            .state(clusterState)
            .nodes(newNodesBuilder().putAll(clusterState.nodes()).put(newNode("node2")))
            .build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    routingNodes = clusterState.routingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    assertThat(clusterState.readOnlyRoutingNodes().numberOfShardsOfType(STARTED), equalTo(10));

    for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node1")) {
      assertThat(shardRouting.index(), equalTo("test"));
    }
    for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node2")) {
      assertThat(shardRouting.index(), equalTo("test1"));
    }

    logger.info(
        "update "
            + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE
            + " for test, see that things move");
    metaData =
        newMetaDataBuilder()
            .metaData(metaData)
            .put(
                newIndexMetaDataBuilder("test")
                    .settings(
                        ImmutableSettings.settingsBuilder()
                            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
                            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                            .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)))
            .build();

    clusterState = newClusterStateBuilder().state(clusterState).metaData(metaData).build();

    logger.info("reroute after setting");
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();

    assertThat(
        clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED),
        equalTo(3));
    assertThat(
        clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(RELOCATING),
        equalTo(2));
    assertThat(
        clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING),
        equalTo(2));
    assertThat(
        clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED),
        equalTo(3));
    // the first move will destroy the balance and the balancer will move 2 shards from node2 to
    // node one right after
    // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance
    routingNodes = clusterState.routingNodes();
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = newClusterStateBuilder().state(clusterState).routingTable(routingTable).build();
    // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on
    // the average
    assertThat(
        clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED),
        equalTo(5));
    assertThat(
        clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED),
        equalTo(5));
  }
}
Exemplo n.º 24
0
/** Test CORS where the allow origin value is a regular expression. */
@ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1)
public class CorsRegexIT extends ESIntegTestCase {

  protected static final ESLogger logger = Loggers.getLogger(CorsRegexIT.class);

  @Override
  protected Settings nodeSettings(int nodeOrdinal) {
    return Settings.builder()
        .put(super.nodeSettings(nodeOrdinal))
        .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "/https?:\\/\\/localhost(:[0-9]+)?/")
        .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true)
        .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post")
        .put(SETTING_CORS_ENABLED.getKey(), true)
        .put(NetworkModule.HTTP_ENABLED.getKey(), true)
        .build();
  }

  public void testThatRegularExpressionWorksOnMatch() throws Exception {
    String corsValue = "http://localhost:9200";
    HttpResponse response =
        httpClient()
            .method("GET")
            .path("/")
            .addHeader("User-Agent", "Mozilla Bar")
            .addHeader("Origin", corsValue)
            .execute();
    assertResponseWithOriginheader(response, corsValue);

    corsValue = "https://localhost:9200";
    response =
        httpClient()
            .method("GET")
            .path("/")
            .addHeader("User-Agent", "Mozilla Bar")
            .addHeader("Origin", corsValue)
            .execute();
    assertResponseWithOriginheader(response, corsValue);
    assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Credentials"));
    assertThat(response.getHeaders().get("Access-Control-Allow-Credentials"), is("true"));
  }

  public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Exception {
    HttpResponse response =
        httpClient()
            .method("GET")
            .path("/")
            .addHeader("User-Agent", "Mozilla Bar")
            .addHeader("Origin", "http://evil-host:9200")
            .execute();
    // a rejected origin gets a FORBIDDEN - 403
    assertThat(response.getStatusCode(), is(403));
    assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
  }

  public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception {
    HttpResponse response =
        httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").execute();
    assertThat(response.getStatusCode(), is(200));
    assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
  }

  public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception {
    HttpResponse response = httpClient().method("GET").path("/").execute();
    assertThat(response.getStatusCode(), is(200));
    assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
  }

  public void testThatPreFlightRequestWorksOnMatch() throws Exception {
    String corsValue = "http://localhost:9200";
    HttpResponse response =
        httpClient()
            .method("OPTIONS")
            .path("/")
            .addHeader("User-Agent", "Mozilla Bar")
            .addHeader("Origin", corsValue)
            .addHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")
            .execute();
    assertResponseWithOriginheader(response, corsValue);
    assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Methods"));
  }

  public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception {
    HttpResponse response =
        httpClient()
            .method("OPTIONS")
            .path("/")
            .addHeader("User-Agent", "Mozilla Bar")
            .addHeader("Origin", "http://evil-host:9200")
            .addHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")
            .execute();
    // a rejected origin gets a FORBIDDEN - 403
    assertThat(response.getStatusCode(), is(403));
    assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
    assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Methods")));
  }

  protected static void assertResponseWithOriginheader(
      HttpResponse response, String expectedCorsHeader) {
    assertThat(response.getStatusCode(), is(200));
    assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Origin"));
    assertThat(response.getHeaders().get("Access-Control-Allow-Origin"), is(expectedCorsHeader));
  }
}
Exemplo n.º 25
0
/** A set of bytes ref terms. */
public class BytesRefTermsSet extends TermsSet {

  private transient Counter bytesUsed;
  private transient ByteBlockPool pool;
  private transient BytesRefHash set;

  /**
   * The size of the header: four bytes for the terms encoding ordinal, 1 byte for the {@link
   * #isPruned} flag, and four bytes for the size.
   */
  private static final int HEADER_SIZE = 9;

  private static final ESLogger logger = Loggers.getLogger(BytesRefTermsSet.class);

  public BytesRefTermsSet(final CircuitBreaker breaker) {
    super(breaker);
    this.bytesUsed = Counter.newCounter();
    this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
    this.set = new BytesRefHash(pool);
  }

  /**
   * Constructor based on a byte array containing the encoded set of terms. Used in {@link
   * solutions.siren.join.index.query.TermsEnumTermsQuery}.
   */
  public BytesRefTermsSet(BytesRef bytes) {
    super(null);
    this.readFromBytes(bytes);
  }

  public void add(BytesRef term) {
    this.set.add(term);
  }

  public boolean contains(BytesRef term) {
    return this.set.find(term) != -1;
  }

  @Override
  protected void addAll(TermsSet terms) {
    if (!(terms instanceof BytesRefTermsSet)) {
      throw new UnsupportedOperationException("Invalid type: BytesRefTermsSet expected.");
    }

    BytesRefHash input = ((BytesRefTermsSet) terms).set;
    BytesRef reusable = new BytesRef();
    for (int i = 0; i < input.size(); i++) {
      input.get(i, reusable);
      set.add(reusable);
    }
  }

  public BytesRefHash getBytesRefHash() {
    return set;
  }

  @Override
  public int size() {
    return this.set.size();
  }

  /** Return the memory usage of this object in bytes. */
  public long ramBytesUsed() {
    return bytesUsed.get();
  }

  @Override
  public void readFrom(StreamInput in) throws IOException {
    this.setIsPruned(in.readBoolean());
    int size = in.readInt();

    bytesUsed = Counter.newCounter();
    pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
    set = new BytesRefHash(pool);

    for (long i = 0; i < size; i++) {
      set.add(in.readBytesRef());
    }
  }

  @Override
  public void writeTo(StreamOutput out) throws IOException {
    // Encode flag
    out.writeBoolean(this.isPruned());

    // Encode size of list
    out.writeInt(set.size());

    // Encode BytesRefs
    BytesRef reusable = new BytesRef();
    for (int i = 0; i < this.set.size(); i++) {
      this.set.get(i, reusable);
      out.writeBytesRef(reusable);
    }
  }

  @Override
  public BytesRef writeToBytes() {
    long start = System.nanoTime();
    int size = set.size();

    BytesRef bytes = new BytesRef(new byte[HEADER_SIZE + (int) bytesUsed.get()]);

    // Encode encoding type
    Bytes.writeInt(bytes, this.getEncoding().ordinal());

    // Encode flag
    bytes.bytes[bytes.offset++] = (byte) (this.isPruned() ? 1 : 0);

    // Encode size of the set
    Bytes.writeInt(bytes, size);

    // Encode longs
    BytesRef reusable = new BytesRef();
    for (int i = 0; i < this.set.size(); i++) {
      this.set.get(i, reusable);
      Bytes.writeBytesRef(reusable, bytes);
    }

    logger.debug(
        "Serialized {} terms - took {} ms", this.size(), (System.nanoTime() - start) / 1000000);

    bytes.length = bytes.offset;
    bytes.offset = 0;
    return bytes;
  }

  private void readFromBytes(BytesRef bytes) {
    // Read pruned flag
    this.setIsPruned(bytes.bytes[bytes.offset++] == 1 ? true : false);

    // Read size fo the set
    int size = Bytes.readInt(bytes);

    // Read terms
    bytesUsed = Counter.newCounter();
    pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
    set = new BytesRefHash(pool);

    BytesRef reusable = new BytesRef();
    for (int i = 0; i < size; i++) {
      Bytes.readBytesRef(bytes, reusable);
      set.add(reusable);
    }
  }

  @Override
  public TermsByQueryRequest.TermsEncoding getEncoding() {
    return TermsByQueryRequest.TermsEncoding.BYTES;
  }

  @Override
  public void release() {
    if (set != null) {
      set.close();
    }
  }
}
Exemplo n.º 26
0
public class GceMockUtils {
  protected static final Logger logger = Loggers.getLogger(GceMockUtils.class);

  public static final String GCE_METADATA_URL =
      "http://metadata.google.internal/computeMetadata/v1/instance";

  protected static HttpTransport configureMock() {
    return new MockHttpTransport() {
      @Override
      public LowLevelHttpRequest buildRequest(String method, final String url) throws IOException {
        return new MockLowLevelHttpRequest() {
          @Override
          public LowLevelHttpResponse execute() throws IOException {
            MockLowLevelHttpResponse response = new MockLowLevelHttpResponse();
            response.setStatusCode(200);
            response.setContentType(Json.MEDIA_TYPE);
            if (url.startsWith(GCE_METADATA_URL)) {
              logger.info("--> Simulate GCE Auth/Metadata response for [{}]", url);
              response.setContent(readGoogleInternalJsonResponse(url));
            } else {
              logger.info("--> Simulate GCE API response for [{}]", url);
              response.setContent(readGoogleApiJsonResponse(url));
            }

            return response;
          }
        };
      }
    };
  }

  public static String readGoogleInternalJsonResponse(String url) throws IOException {
    return readJsonResponse(url, "http://metadata.google.internal/");
  }

  public static String readGoogleApiJsonResponse(String url) throws IOException {
    return readJsonResponse(url, "https://www.googleapis.com/");
  }

  private static String readJsonResponse(String url, String urlRoot) throws IOException {
    // We extract from the url the mock file path we want to use
    String mockFileName = Strings.replace(url, urlRoot, "");

    URL resource = GceMockUtils.class.getResource(mockFileName);
    if (resource == null) {
      throw new IOException(
          "can't read [" + url + "] in src/test/resources/org/elasticsearch/discovery/gce");
    }
    try (InputStream is = resource.openStream()) {
      final StringBuilder sb = new StringBuilder();
      Streams.readAllLines(
          is,
          new Callback<String>() {
            @Override
            public void handle(String s) {
              sb.append(s);
            }
          });
      String response = sb.toString();
      return response;
    }
  }
}
 @Override
 public ESLogger createLogger(Class<?> clazz) {
   return Loggers.getLogger(clazz, settings.globalSettings(), riverName);
 }
public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {

  private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);

  @Test
  public void testSingleIndexStartedShard() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        MetaData.builder()
            .put(
                IndexMetaData.builder("test")
                    .settings(settings(Version.CURRENT))
                    .numberOfShards(1)
                    .numberOfReplicas(0))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());

    logger.info("Adding one node and performing rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().put(newNode("node1")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Rerouting again, nothing should change");
    prevRoutingTable = routingTable;
    clusterState = ClusterState.builder(clusterState).build();
    routingTable = strategy.reroute(clusterState).routingTable();
    assertThat(routingTable == prevRoutingTable, equalTo(true));
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    logger.info("Marking the shard as started");
    RoutingNodes routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyStartedShards(
                clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(routingTable != prevRoutingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Starting another node and making sure nothing changed");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2")))
            .build();
    prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(routingTable == prevRoutingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Killing node1 where the shard is, checking the shard is relocated");

    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1"))
            .build();
    prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(routingTable != prevRoutingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));

    logger.info(
        "Start another node, make sure that things remain the same (shard is in node2 and initializing)");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")))
            .build();
    prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    assertThat(routingTable == prevRoutingTable, equalTo(true));

    logger.info("Start the shard on node 2");
    routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyStartedShards(
                clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(routingTable != prevRoutingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
  }

  @Test
  public void testSingleIndexShardFailed() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        MetaData.builder()
            .put(
                IndexMetaData.builder("test")
                    .settings(settings(Version.CURRENT))
                    .numberOfShards(1)
                    .numberOfReplicas(0))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());

    logger.info("Adding one node and rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().put(newNode("node1")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Marking the shard as failed");
    RoutingNodes routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyFailedShard(
                clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
  }

  @Test
  public void testMultiIndexEvenDistribution() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder()
                .put("cluster.routing.allocation.concurrent_recoveries", 10)
                .put(
                    ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
                    "always")
                .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
                .build());

    final int numberOfIndices = 50;
    logger.info("Building initial routing table with " + numberOfIndices + " indices");

    MetaData.Builder metaDataBuilder = MetaData.builder();
    for (int i = 0; i < numberOfIndices; i++) {
      metaDataBuilder.put(
          IndexMetaData.builder("test" + i)
              .settings(settings(Version.CURRENT))
              .numberOfShards(1)
              .numberOfReplicas(0));
    }
    MetaData metaData = metaDataBuilder.build();

    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    for (int i = 0; i < numberOfIndices; i++) {
      routingTableBuilder.addAsNew(metaData.index("test" + i));
    }
    RoutingTable routingTable = routingTableBuilder.build();
    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();

    assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
    }

    logger.info("Adding " + (numberOfIndices / 2) + " nodes");
    DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
    List<DiscoveryNode> nodes = newArrayList();
    for (int i = 0; i < (numberOfIndices / 2); i++) {
      nodesBuilder.put(newNode("node" + i));
    }
    RoutingTable prevRoutingTable = routingTable;
    clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
      assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
      // make sure we still have 2 shards initializing per node on the first 25 nodes
      String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
      int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
      assertThat(nodeIndex, lessThan(25));
    }
    RoutingNodes routingNodes = clusterState.routingNodes();
    Set<String> encounteredIndices = newHashSet();
    for (RoutingNode routingNode : routingNodes) {
      assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0));
      assertThat(routingNode.size(), equalTo(2));
      // make sure we still have 2 shards initializing per node on the only 25 nodes
      int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length()));
      assertThat(nodeIndex, lessThan(25));
      // check that we don't have a shard associated with a node with the same index name (we have a
      // single shard)
      for (ShardRouting shardRoutingEntry : routingNode) {
        assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.index())));
        encounteredIndices.add(shardRoutingEntry.index());
      }
    }

    logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
    nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
    for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
      nodesBuilder.put(newNode("node" + i));
    }
    prevRoutingTable = routingTable;
    clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(false));

    logger.info("Marking the shard as started");
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    int numberOfRelocatingShards = 0;
    int numberOfStartedShards = 0;
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(),
          anyOf(equalTo(STARTED), equalTo(RELOCATING)));
      if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) {
        numberOfStartedShards++;
      } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
        numberOfRelocatingShards++;
      }
      assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
      // make sure we still have 2 shards either relocating or started on the first 25 nodes (still)
      String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
      int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
      assertThat(nodeIndex, lessThan(25));
    }
    assertThat(numberOfRelocatingShards, equalTo(25));
    assertThat(numberOfStartedShards, equalTo(25));
  }

  @Test
  public void testMultiIndexUnevenNodes() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder()
                .put("cluster.routing.allocation.concurrent_recoveries", 10)
                .put(
                    ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
                    "always")
                .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
                .build());

    final int numberOfIndices = 10;
    logger.info("Building initial routing table with " + numberOfIndices + " indices");

    MetaData.Builder metaDataBuilder = MetaData.builder();
    for (int i = 0; i < numberOfIndices; i++) {
      metaDataBuilder.put(
          IndexMetaData.builder("test" + i)
              .settings(settings(Version.CURRENT))
              .numberOfShards(1)
              .numberOfReplicas(0));
    }
    MetaData metaData = metaDataBuilder.build();

    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    for (int i = 0; i < numberOfIndices; i++) {
      routingTableBuilder.addAsNew(metaData.index("test" + i));
    }
    RoutingTable routingTable = routingTableBuilder.build();

    ClusterState clusterState =
        ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
            .metaData(metaData)
            .routingTable(routingTable)
            .build();

    assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));

    logger.info("Starting 3 nodes and rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(
                DiscoveryNodes.builder()
                    .put(newNode("node1"))
                    .put(newNode("node2"))
                    .put(newNode("node3")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    }
    RoutingNodes routingNodes = clusterState.routingNodes();
    assertThat(numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(numberOfIndices));
    assertThat(
        routingNodes.node("node1").numberOfShardsWithState(INITIALIZING),
        anyOf(equalTo(3), equalTo(4)));
    assertThat(
        routingNodes.node("node2").numberOfShardsWithState(INITIALIZING),
        anyOf(equalTo(3), equalTo(4)));
    assertThat(
        routingNodes.node("node2").numberOfShardsWithState(INITIALIZING),
        anyOf(equalTo(3), equalTo(4)));

    logger.info("Start two more nodes, things should remain the same");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(
                DiscoveryNodes.builder(clusterState.nodes())
                    .put(newNode("node4"))
                    .put(newNode("node5")))
            .build();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();

    assertThat(prevRoutingTable == routingTable, equalTo(true));

    routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(),
          anyOf(equalTo(RELOCATING), equalTo(STARTED)));
    }
    routingNodes = clusterState.routingNodes();
    assertThat(
        "4 source shard routing are relocating",
        numberOfShardsOfType(routingNodes, RELOCATING),
        equalTo(4));
    assertThat(
        "4 target shard routing are initializing",
        numberOfShardsOfType(routingNodes, INITIALIZING),
        equalTo(4));

    logger.info("Now, mark the relocated as started");
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    //        routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable),
    // nodes);

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    for (int i = 0; i < numberOfIndices; i++) {
      assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
      assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
      assertThat(
          routingTable.index("test" + i).shard(0).shards().get(0).state(),
          anyOf(equalTo(RELOCATING), equalTo(STARTED)));
    }
    routingNodes = clusterState.routingNodes();
    assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices));
    for (RoutingNode routingNode : routingNodes) {
      assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(2));
    }
  }
}
Exemplo n.º 29
0
public class DocTableInfoBuilder {

  private final TableIdent ident;
  private ExecutorService executorService;
  private final boolean checkAliasSchema;
  private final Functions functions;
  private final ClusterService clusterService;
  private final TransportPutIndexTemplateAction transportPutIndexTemplateAction;
  private final MetaData metaData;
  private String[] concreteIndices;
  private static final ESLogger logger = Loggers.getLogger(DocTableInfoBuilder.class);

  public DocTableInfoBuilder(
      Functions functions,
      TableIdent ident,
      ClusterService clusterService,
      TransportPutIndexTemplateAction transportPutIndexTemplateAction,
      ExecutorService executorService,
      boolean checkAliasSchema) {
    this.functions = functions;
    this.clusterService = clusterService;
    this.transportPutIndexTemplateAction = transportPutIndexTemplateAction;
    this.ident = ident;
    this.executorService = executorService;
    this.metaData = clusterService.state().metaData();
    this.checkAliasSchema = checkAliasSchema;
  }

  private DocIndexMetaData docIndexMetaData() {
    DocIndexMetaData docIndexMetaData;
    String templateName = PartitionName.templateName(ident.schema(), ident.name());
    boolean createdFromTemplate = false;
    if (metaData.getTemplates().containsKey(templateName)) {
      docIndexMetaData = buildDocIndexMetaDataFromTemplate(ident.indexName(), templateName);
      createdFromTemplate = true;
      concreteIndices =
          metaData.concreteIndices(IndicesOptions.lenientExpandOpen(), ident.indexName());
    } else {
      try {
        concreteIndices =
            metaData.concreteIndices(IndicesOptions.strictExpandOpen(), ident.indexName());
        if (concreteIndices.length == 0) {
          // no matching index found
          throw new TableUnknownException(ident);
        }
        docIndexMetaData = buildDocIndexMetaData(concreteIndices[0]);
      } catch (IndexMissingException ex) {
        throw new TableUnknownException(ident.fqn(), ex);
      }
    }

    if ((!createdFromTemplate && concreteIndices.length == 1) || !checkAliasSchema) {
      return docIndexMetaData;
    }
    for (int i = 0; i < concreteIndices.length; i++) {
      try {
        docIndexMetaData =
            docIndexMetaData.merge(
                buildDocIndexMetaData(concreteIndices[i]),
                transportPutIndexTemplateAction,
                createdFromTemplate);
      } catch (IOException e) {
        throw new UnhandledServerException("Unable to merge/build new DocIndexMetaData", e);
      }
    }
    return docIndexMetaData;
  }

  private DocIndexMetaData buildDocIndexMetaData(String index) {
    DocIndexMetaData docIndexMetaData;
    try {
      docIndexMetaData = new DocIndexMetaData(functions, metaData.index(index), ident);
    } catch (IOException e) {
      throw new UnhandledServerException("Unable to build DocIndexMetaData", e);
    }
    return docIndexMetaData.build();
  }

  private DocIndexMetaData buildDocIndexMetaDataFromTemplate(String index, String templateName) {
    IndexTemplateMetaData indexTemplateMetaData = metaData.getTemplates().get(templateName);
    DocIndexMetaData docIndexMetaData;
    try {
      IndexMetaData.Builder builder = new IndexMetaData.Builder(index);
      builder.putMapping(
          Constants.DEFAULT_MAPPING_TYPE,
          indexTemplateMetaData.getMappings().get(Constants.DEFAULT_MAPPING_TYPE).toString());
      Settings settings = indexTemplateMetaData.settings();
      builder.settings(settings);
      // default values
      builder.numberOfShards(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
      builder.numberOfReplicas(settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
      docIndexMetaData = new DocIndexMetaData(functions, builder.build(), ident);
    } catch (IOException e) {
      throw new UnhandledServerException("Unable to build DocIndexMetaData from template", e);
    }
    return docIndexMetaData.build();
  }

  public DocTableInfo build() {
    DocIndexMetaData md = docIndexMetaData();

    List<PartitionName> partitions = new ArrayList<>();
    if (md.partitionedBy().size() > 0) {
      for (String index : concreteIndices) {
        if (PartitionName.isPartition(index)) {
          try {
            PartitionName partitionName = PartitionName.fromIndexOrTemplate(index);
            assert partitionName.tableIdent().equals(ident);
            partitions.add(partitionName);
          } catch (IllegalArgumentException e) {
            // ignore
            logger.warn(
                String.format(
                    Locale.ENGLISH,
                    "Cannot build partition %s of index %s",
                    index,
                    ident.indexName()));
          }
        }
      }
    }
    return new DocTableInfo(
        ident,
        md.columns(),
        md.partitionedByColumns(),
        md.generatedColumnReferences(),
        md.indices(),
        md.references(),
        md.analyzers(),
        md.primaryKey(),
        md.routingCol(),
        md.isAlias(),
        md.hasAutoGeneratedPrimaryKey(),
        concreteIndices,
        clusterService,
        md.numberOfShards(),
        md.numberOfReplicas(),
        md.tableParameters(),
        md.partitionedBy(),
        partitions,
        md.columnPolicy(),
        executorService);
  }
}
@Test
public class RiverMongoScriptTest extends RiverMongoDBTestAsbtract {

  private final ESLogger logger = Loggers.getLogger(getClass());
  private static final long wait = 2000;

  private DB mongoDB;
  private DBCollection mongoCollection;

  protected RiverMongoScriptTest() {
    super(
        "testriver-" + System.currentTimeMillis(),
        "testdatabase-" + System.currentTimeMillis(),
        "documents-" + System.currentTimeMillis(),
        "testindex-" + System.currentTimeMillis());
  }

  @BeforeClass
  public void createDatabase() {
    logger.debug("createDatabase {}", getDatabase());
    try {
      mongoDB = getMongo().getDB(getDatabase());
      mongoDB.setWriteConcern(WriteConcern.REPLICAS_SAFE);
      // logger.debug("Create river {}", getRiver());
      // super.createRiver("test-mongodb-river-with-script.json",
      // String.valueOf(getMongoPort1()), String.valueOf(getMongoPort2()),
      // String.valueOf(getMongoPort3()), getDatabase(), COLLECTION_NAME,
      // SCRIPT, INDEX_NAME);
      logger.info("Start createCollection");
      mongoCollection = mongoDB.createCollection(getCollection(), null);
      Assert.assertNotNull(mongoCollection);
    } catch (Throwable t) {
      logger.error("createDatabase failed.", t);
    }
  }

  @AfterClass
  public void cleanUp() {
    // super.deleteRiver();
    logger.info("Drop database " + mongoDB.getName());
    mongoDB.dropDatabase();
  }

  @Test
  public void testIgnoreScript() throws Throwable {
    logger.debug("Start testIgnoreScript");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.ignore = true;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));
      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.info("Document count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(0l));

      mongoCollection.remove(dbObject);

    } catch (Throwable t) {
      logger.error("testIgnoreScript failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }

  @Test
  public void testUpdateAttribute() throws Throwable {
    logger.debug("Start testUpdateAttribute");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.document.score = 200;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      assertThat(sr.getHits().getHits()[0].sourceAsMap().containsKey("score"), equalTo(true));
      int score = Integer.parseInt(sr.getHits().getHits()[0].sourceAsMap().get("score").toString());

      logger.debug("Score: {}", score);
      assertThat(score, equalTo(200));

      mongoCollection.remove(dbObject, WriteConcern.REPLICAS_SAFE);

    } catch (Throwable t) {
      logger.error("testUpdateAttribute failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }

  @Test
  public void testRemoveAttribute() throws Throwable {
    logger.debug("Start testRemoveAttribute");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "delete ctx.document.score;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      assertThat(sr.getHits().getHits()[0].sourceAsMap().containsKey("score"), equalTo(false));
      mongoCollection.remove(dbObject);
    } catch (Throwable t) {
      logger.error("testRemoveAttribute failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }

  @Test
  public void testRenameAttribute() throws Throwable {
    logger.debug("Start testRenameAttribute");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "ctx.document.score2 = ctx.document.score; delete ctx.document.score;";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      assertThat(sr.getHits().getHits()[0].sourceAsMap().containsKey("score2"), equalTo(true));
      mongoCollection.remove(dbObject);
    } catch (Throwable t) {
      logger.error("testRenameAttribute failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }

  @Test
  public void testDeleteDocument() throws Throwable {
    logger.debug("Start testDeleteDocument");
    try {
      logger.debug("Create river {}", getRiver());
      String script = "if (ctx.document.to_be_deleted == true) { ctx.operation = 'd' };";
      super.createRiver(
          "/test/elasticsearch/plugin/river/mongodb/script/test-mongodb-river-with-script.json",
          getRiver(),
          String.valueOf(getMongoPort1()),
          String.valueOf(getMongoPort2()),
          String.valueOf(getMongoPort3()),
          getDatabase(),
          getCollection(),
          script,
          getIndex(),
          getDatabase());

      String mongoDocument =
          copyToStringFromClasspath(
              "/test/elasticsearch/plugin/river/mongodb/script/test-simple-mongodb-document.json");
      DBObject dbObject = (DBObject) JSON.parse(mongoDocument);
      WriteResult result = mongoCollection.insert(dbObject);
      Thread.sleep(wait);
      String id = dbObject.get("_id").toString();
      logger.info("WriteResult: {}", result.toString());
      refreshIndex();

      ActionFuture<IndicesExistsResponse> response =
          getNode().client().admin().indices().exists(new IndicesExistsRequest(getIndex()));
      assertThat(response.actionGet().isExists(), equalTo(true));

      SearchResponse sr =
          getNode()
              .client()
              .prepareSearch(getIndex())
              .setQuery(fieldQuery("_id", id))
              .execute()
              .actionGet();
      logger.debug("SearchResponse {}", sr.toString());
      long totalHits = sr.getHits().getTotalHits();
      logger.debug("TotalHits: {}", totalHits);
      assertThat(totalHits, equalTo(1l));

      dbObject.put("to_be_deleted", Boolean.TRUE);
      mongoCollection.save(dbObject);

      Thread.sleep(wait);
      refreshIndex();

      CountResponse countResponse = getNode().client().count(countRequest(getIndex())).actionGet();
      logger.info("Document count: {}", countResponse.getCount());
      assertThat(countResponse.getCount(), equalTo(0l));

      mongoCollection.remove(dbObject);
    } catch (Throwable t) {
      logger.error("testDeleteDocument failed.", t);
      t.printStackTrace();
      throw t;
    } finally {
      super.deleteRiver();
      super.deleteIndex();
    }
  }
}