예제 #1
1
  public String getHardwareAddress() {
    TransportAddress transportAddress = httpServerTransport.boundAddress().publishAddress();
    if (!(transportAddress instanceof InetSocketTransportAddress)) {
      return null;
    }

    String hardwareAddress = null;
    InetAddress inetAddress =
        ((InetSocketTransportAddress) transportAddress).address().getAddress();
    try {
      NetworkInterface networkInterface = NetworkInterface.getByInetAddress(inetAddress);
      if (networkInterface != null) {
        if (networkInterface.getName().equals("lo")) {
          hardwareAddress = "loopback device";
        } else {
          byte[] hardwareAddressBytes = networkInterface.getHardwareAddress();
          StringBuilder sb = new StringBuilder(18);
          for (byte b : hardwareAddressBytes) {
            if (sb.length() > 0) sb.append(':');
            sb.append(String.format("%02x", b));
          }
          hardwareAddress = sb.toString();
        }
      }

    } catch (SocketException e) {
      if (logger.isTraceEnabled()) {
        logger.trace("Error getting network interface", e);
      }
    }
    return hardwareAddress;
  }
  /** Starts the harvester for queries and/or URLs */
  public boolean runIndexAll() {
    logger.info(
        "Starting RDF harvester: endpoint [{}], queries [{}],"
            + "URIs [{}], index name [{}], typeName [{}]",
        rdfEndpoint,
        rdfQueries,
        rdfUris,
        indexName,
        typeName);

    while (true) {
      if (this.closed) {
        logger.info(
            "Ended harvest for endpoint [{}], queries [{}],"
                + "URIs [{}], index name {}, type name {}",
            rdfEndpoint,
            rdfQueries,
            rdfUris,
            indexName,
            typeName);
        return true;
      }

      /** Harvest from a SPARQL endpoint */
      if (!rdfQueries.isEmpty()) {
        harvestFromEndpoint();
      }

      /** Harvest from RDF dumps */
      harvestFromDumps();

      closed = true;
    }
  }
예제 #3
0
  public static DigestBlob resumeTransfer(
      BlobContainer blobContainer, String digest, UUID transferId, long currentPos) {
    DigestBlob digestBlob = new DigestBlob(blobContainer, digest, transferId);
    digestBlob.file = getTmpFilePath(blobContainer, digest, transferId);

    try {
      logger.trace("Resuming DigestBlob {}. CurrentPos {}", digest, currentPos);

      digestBlob.headFileChannel = new FileOutputStream(digestBlob.file, false).getChannel();
      digestBlob.headLength = currentPos;
      digestBlob.headSize = new AtomicLong();
      digestBlob.headCatchedUpLatch = new CountDownLatch(1);

      RandomAccessFile raf = new RandomAccessFile(digestBlob.file, "rw");
      raf.setLength(currentPos);
      raf.close();

      FileOutputStream outputStream = new FileOutputStream(digestBlob.file, true);
      digestBlob.fileChannel = outputStream.getChannel();
    } catch (IOException ex) {
      logger.error("error resuming transfer of {}, id: {}", ex, digest, transferId);
      return null;
    }

    return digestBlob;
  }
  private static Client startClient(Path tempDir, TransportAddress... transportAddresses) {
    Settings clientSettings =
        Settings.settingsBuilder()
            .put("name", "qa_smoke_client_" + counter.getAndIncrement())
            .put(
                InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
                true) // prevents any settings to be replaced by system properties.
            .put("client.transport.ignore_cluster_name", true)
            .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
            .put("node.mode", "network")
            .build(); // we require network here!

    TransportClient.Builder transportClientBuilder =
        TransportClient.builder().settings(clientSettings);
    TransportClient client =
        transportClientBuilder.build().addTransportAddresses(transportAddresses);

    logger.info("--> Elasticsearch Java TransportClient started");

    Exception clientException = null;
    try {
      ClusterHealthResponse health = client.admin().cluster().prepareHealth().get();
      logger.info(
          "--> connected to [{}] cluster which is running [{}] node(s).",
          health.getClusterName(),
          health.getNumberOfNodes());
    } catch (Exception e) {
      clientException = e;
    }

    assumeNoException(
        "Sounds like your cluster is not running at " + clusterAddresses, clientException);

    return client;
  }
  protected void deleteMailsFromUserMailbox(
      final Properties props,
      final String folderName,
      final int start,
      final int deleteCount,
      final String user,
      final String password)
      throws MessagingException {
    final Store store = Session.getInstance(props).getStore();

    store.connect(user, password);
    checkStoreForTestConnection(store);
    final Folder f = store.getFolder(folderName);
    f.open(Folder.READ_WRITE);

    final int msgCount = f.getMessageCount();

    final Message[] m =
        deleteCount == -1
            ? f.getMessages()
            : f.getMessages(start, Math.min(msgCount, deleteCount + start - 1));
    int d = 0;

    for (final Message message : m) {
      message.setFlag(Flag.DELETED, true);
      logger.info(
          "Delete msgnum: {} with sid {}", message.getMessageNumber(), message.getSubject());
      d++;
    }

    f.close(true);
    logger.info("Deleted " + d + " messages");
    store.close();
  }
  private boolean handleRevision1Response(ChannelHandlerContext ctx, int payloadLength)
      throws Exception {
    int code = buffered.readInt();

    int descriptionLength = buffered.readInt();
    byte[] descBytes = new byte[descriptionLength];
    buffered.readBytes(descBytes, 0, descBytes.length);

    String description = new String(descBytes, StandardCharsets.UTF_8);

    logger.debug(
        "Decoded payload with length:[{}], code:[{}], descriptionLength:[{}], description:[{}] on connection [{}]",
        payloadLength,
        code,
        descriptionLength,
        description,
        ctx.getChannel().getLocalAddress());

    if (200 <= code && code <= 299) {
      logger.info(
          "Connected to Found Elasticsearch: [{}]: [{}] on connection [{}]",
          code,
          description,
          ctx.getChannel().getLocalAddress());
      return true;
    } else {
      logger.error(
          "Unable to connect to Found Elasticsearch: [{}]: [{}] on connection [{}]",
          code,
          description,
          ctx.getChannel().getLocalAddress());
      return false;
    }
  }
예제 #7
0
  public static void tryMlockall() {
    int errno = Integer.MIN_VALUE;
    try {
      int result = CLibrary.mlockall(CLibrary.MCL_CURRENT);
      if (result != 0) {
        errno = Native.getLastError();
      } else {
        LOCAL_MLOCKALL = true;
      }
    } catch (UnsatisfiedLinkError e) {
      // this will have already been logged by CLibrary, no need to repeat it
      return;
    }

    if (errno != Integer.MIN_VALUE) {
      if (errno == CLibrary.ENOMEM
          && System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("linux")) {
        logger.warn(
            "Unable to lock JVM memory (ENOMEM)."
                + " This can result in part of the JVM being swapped out."
                + " Increase RLIMIT_MEMLOCK (ulimit).");
      } else if (!System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("mac")) {
        // OS X allows mlockall to be called, but always returns an error
        logger.warn("Unknown mlockall error " + errno);
      }
    }
  }
 @Before
 public void startNodes() {
   try {
     setClusterName();
     startNode("1");
     startNode("2"); // we need 2 nodes for knapsack
     findNodeAddress();
     try {
       ClusterHealthResponse healthResponse =
           client("1")
               .execute(
                   ClusterHealthAction.INSTANCE,
                   new ClusterHealthRequest()
                       .waitForYellowStatus()
                       .timeout(TimeValue.timeValueSeconds(30)))
               .actionGet();
       if (healthResponse != null && healthResponse.isTimedOut()) {
         throw new IOException(
             "cluster state is "
                 + healthResponse.getStatus().name()
                 + ", from here on, everything will fail!");
       }
     } catch (ElasticsearchTimeoutException e) {
       throw new IOException(
           "timeout, cluster does not respond to health request, cowardly refusing to continue with operations");
     }
     logger.info("ready");
   } catch (Throwable t) {
     logger.error("startNodes failed", t);
   }
 }
 @Override
 public BulkNodeClient newIndex(String index, Settings settings, Map<String, String> mappings) {
   if (closed) {
     throw new ElasticsearchIllegalStateException("client is closed");
   }
   if (client == null) {
     logger.warn("no client for create index");
     return this;
   }
   if (index == null) {
     logger.warn("no index name given to create index");
     return this;
   }
   CreateIndexRequestBuilder createIndexRequestBuilder =
       new CreateIndexRequestBuilder(client.admin().indices()).setIndex(index);
   if (settings != null) {
     logger.info("settings = {}", settings.getAsStructuredMap());
     createIndexRequestBuilder.setSettings(settings);
   }
   if (mappings != null) {
     for (String type : mappings.keySet()) {
       logger.info("found mapping for {}", type);
       createIndexRequestBuilder.addMapping(type, mappings.get(type));
     }
   }
   createIndexRequestBuilder.execute().actionGet();
   logger.info("index {} created", index);
   return this;
 }
  private void loadPrepDict() {

    _PrepDict = new DictSegment((char) 0);
    File file = new File(configuration.getDictRoot(), Dictionary.PATH_DIC_PREP);
    InputStream is = null;
    try {
      is = new FileInputStream(file);
    } catch (FileNotFoundException e) {
      logger.error("ik-analyzer", e);
    }
    if (is == null) {
      throw new RuntimeException("Preposition Dictionary not found!!!");
    }
    try {

      BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
      String theWord;
      do {
        theWord = br.readLine();
        if (theWord != null && !"".equals(theWord.trim())) {

          _PrepDict.fillSegment(theWord.trim().toCharArray());
        }
      } while (theWord != null);
    } catch (IOException e) {
      logger.error("ik-analyzer", e);
    } finally {
      try {
        is.close();
        is = null;
      } catch (IOException e) {
        logger.error("ik-analyzer", e);
      }
    }
  }
예제 #11
0
  private void addQueryToStream(
      final Operation operation,
      final Timestamp<?> currentTimestamp,
      final DBObject update,
      final String collection)
      throws InterruptedException {
    if (logger.isTraceEnabled()) {
      logger.trace(
          "addQueryToStream - operation [{}], currentTimestamp [{}], update [{}]",
          operation,
          currentTimestamp,
          update);
    }

    if (collection == null) {
      for (String name : slurpedDb.getCollectionNames()) {
        DBCollection slurpedCollection = slurpedDb.getCollection(name);
        for (DBObject item : slurpedCollection.find(update, findKeys)) {
          addToStream(operation, currentTimestamp, item, collection);
        }
      }
    } else {
      DBCollection slurpedCollection = slurpedDb.getCollection(collection);
      for (DBObject item : slurpedCollection.find(update, findKeys)) {
        addToStream(operation, currentTimestamp, item, collection);
      }
    }
  }
  /** 加载量词词典 */
  private void loadQuantifierDict() {
    // 建立一个量词典实例
    _QuantifierDict = new DictSegment((char) 0);
    // 读取量词词典文件
    File file = new File(configuration.getDictRoot(), Dictionary.PATH_DIC_QUANTIFIER);
    InputStream is = null;
    try {
      is = new FileInputStream(file);
    } catch (FileNotFoundException e) {
      logger.error("ik-analyzer", e);
    }
    try {
      BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"), 512);
      String theWord = null;
      do {
        theWord = br.readLine();
        if (theWord != null && !"".equals(theWord.trim())) {
          _QuantifierDict.fillSegment(theWord.trim().toCharArray());
        }
      } while (theWord != null);

    } catch (IOException ioe) {
      logger.error("Quantifier Dictionary loading exception.");

    } finally {
      try {
        if (is != null) {
          is.close();
          is = null;
        }
      } catch (IOException e) {
        logger.error("ik-analyzer", e);
      }
    }
  }
예제 #13
0
  @Override
  public Node stop() {
    if (!lifecycle.moveToStopped()) {
      return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: stopping ...", Version.full(), JvmInfo.jvmInfo().pid());

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).stop();
    }
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RiversManager.class).stop();
    injector.getInstance(IndicesClusterStateService.class).stop();
    injector.getInstance(IndicesService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();
    injector.getInstance(JmxService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).stop();
    }

    logger.info("{{}}[{}]: stopped", Version.full(), JvmInfo.jvmInfo().pid());

    return this;
  }
  /**
   * Queries the {@link #rdfEndpoint(String)} with each of the {@link #rdfQueries} and harvests the
   * results of the query.
   */
  private void harvestFromEndpoint() {

    Query query;
    QueryExecution qExec;

    for (String rdfQuery : rdfQueries) {
      if (closed) break;

      logger.info(
          "Harvesting with query: [{}] on index [{}] and type [{}]", rdfQuery, indexName, typeName);

      try {
        query = QueryFactory.create(rdfQuery);
      } catch (QueryParseException qpe) {
        logger.error("Could not parse [{}]. Please provide a relevant query. {}", rdfQuery, qpe);
        continue;
      }

      qExec = QueryExecutionFactory.sparqlService(rdfEndpoint, query);

      try {
        harvest(qExec);
      } catch (Exception e) {
        logger.error("Exception [{}] occurred while harvesting", e.getLocalizedMessage());
      } finally {
        qExec.close();
      }
    }
  }
  @Override
  public void sendResponse(final RestResponse response) {

    final User user = this.request.getFromContext("searchguard_authenticated_user");
    final Session _session =
        sessionStore.getSession(SecurityUtil.getSearchGuardSessionIdFromCookie(request));

    if (user != null) {
      if (_session == null) {
        final Session session = sessionStore.createSession(user);
        log.trace("Create session and set cookie for {}", user.getName());
        final CookieEncoder encoder = new CookieEncoder(true);
        final Cookie cookie = new DefaultCookie("es_searchguard_session", session.getId());

        // TODO FUTURE check cookie domain/path
        // cookie.setDomain(arg0);
        // cookie.setPath(arg0);

        cookie.setDiscard(true);
        cookie.setSecure(((NettyHttpRequest) request).request() instanceof DefaultHttpsRequest);
        cookie.setMaxAge(60 * 60); // 1h
        cookie.setHttpOnly(true);
        encoder.addCookie(cookie);
        response.addHeader("Set-Cookie", encoder.encode());
      } else {

        // Set-Cookie: token=deleted; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT
        log.trace("There is already a session");
        // TODO FUTURE check cookie seesion validity, expire, ...

      }
    }

    channel.sendResponse(response);
  }
  @Test
  public void testReplicaLevel() throws IOException {

    int numberOfShards = 5;
    int replicaLevel = 4;
    int shardsAfterReplica = 0;

    final AbstractIngestClient es =
        new IngestClient()
            .newClient(ADDRESS)
            .setIndex("replicatest")
            .setType("replicatest")
            .numberOfShards(numberOfShards)
            .numberOfReplicas(0)
            .dateDetection(false)
            .timeStampFieldEnabled(false)
            .newIndex();

    try {
      for (int i = 0; i < 12345; i++) {
        es.indexDocument(
            "replicatest", "replicatest", null, "{ \"name\" : \"" + randomString(32) + "\"}");
      }
      es.flush();
      shardsAfterReplica = es.updateReplicaLevel(replicaLevel);
      logger.info("shardsAfterReplica={}", shardsAfterReplica);
    } catch (NoNodeAvailableException e) {
      logger.warn("skipping, no node available");
    } finally {
      // assertEquals(shardsAfterReplica, numberOfShards * (replicaLevel + 1));
      es.shutdown();
    }
  }
 @Override
 public BaseIngestTransportClient newIndex(String index) {
   if (client == null) {
     logger.warn("no client for create index");
     return this;
   }
   if (index == null) {
     logger.warn("no index name given to create index");
     return this;
   }
   CreateIndexRequest request = new CreateIndexRequest(index);
   if (getSettings() != null) {
     request.settings(getSettings());
   }
   if (getMappings() != null) {
     for (Map.Entry<String, String> me : getMappings().entrySet()) {
       request.mapping(me.getKey(), me.getValue());
     }
   }
   logger.info(
       "creating index {} with settings = {}, mappings = {}",
       index,
       getSettings() != null ? getSettings().getAsMap() : null,
       getMappings());
   try {
     client.admin().indices().create(request).actionGet();
   } catch (Exception e) {
     logger.error(e.getMessage(), e);
   }
   return this;
 }
  @Test
  public void testRandomDocsNodeClient() throws Exception {
    final NodeClient es =
        new NodeClient()
            .maxActionsPerBulkRequest(1000)
            .flushInterval(TimeValue.timeValueSeconds(10))
            .newClient(client("1"))
            .newIndex("test");

    try {
      for (int i = 0; i < 12345; i++) {
        es.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}");
      }
      es.flush();
    } catch (NoNodeAvailableException e) {
      logger.warn("skipping, no node available");
    } finally {
      es.shutdown();
      assertEquals(13, es.getState().getTotalIngest().count());
      if (es.hasThrowable()) {
        logger.error("error", es.getThrowable());
      }
      assertFalse(es.hasThrowable());
    }
  }
  protected String handleRequest(
      Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
    final String action = buffer.readString();

    final NettyTransportChannel transportChannel =
        new NettyTransportChannel(transport, action, channel, requestId, version);
    try {
      final TransportRequestHandler handler = transportServiceAdapter.handler(action);
      if (handler == null) {
        throw new ActionNotFoundTransportException(action);
      }
      final TransportRequest request = handler.newInstance();
      request.remoteAddress(
          new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
      request.readFrom(buffer);
      if (handler.executor() == ThreadPool.Names.SAME) {
        //noinspection unchecked
        handler.messageReceived(request, transportChannel);
      } else {
        threadPool
            .executor(handler.executor())
            .execute(new RequestHandler(handler, request, transportChannel, action));
      }
    } catch (Throwable e) {
      try {
        transportChannel.sendResponse(e);
      } catch (IOException e1) {
        logger.warn("Failed to send error message back to client for action [" + action + "]", e);
        logger.warn("Actual Exception", e1);
      }
    }
    return action;
  }
  public void testConsistencyAfterIndexCreationFailure() {
    logger.info("--> deleting test index....");
    try {
      client().admin().indices().prepareDelete("test").get();
    } catch (IndexNotFoundException ex) {
      // Ignore
    }

    logger.info("--> creating test index with invalid settings ");
    try {
      client()
          .admin()
          .indices()
          .prepareCreate("test")
          .setSettings(settingsBuilder().put("number_of_shards", "bad"))
          .get();
      fail();
    } catch (SettingsException ex) {
      // Expected
    }

    logger.info("--> creating test index with valid settings ");
    CreateIndexResponse response =
        client()
            .admin()
            .indices()
            .prepareCreate("test")
            .setSettings(settingsBuilder().put("number_of_shards", 1))
            .get();
    assertThat(response.isAcknowledged(), equalTo(true));
  }
예제 #21
0
  public void execute() throws Exception {
    final String activeShardCountFailure = checkActiveShardCount();
    final ShardRouting primaryRouting = primary.routingEntry();
    final ShardId primaryId = primaryRouting.shardId();
    if (activeShardCountFailure != null) {
      finishAsFailed(
          new UnavailableShardsException(
              primaryId,
              "{} Timeout: [{}], request: [{}]",
              activeShardCountFailure,
              request.timeout(),
              request));
      return;
    }

    totalShards.incrementAndGet();
    pendingShards.incrementAndGet();
    primaryResult = primary.perform(request);
    final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
    assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
    }

    performOnReplicas(primaryId, replicaRequest);

    successfulShards.incrementAndGet();
    decPendingAndFinishIfNeeded();
  }
 @Override
 protected void doClose() throws ElasticsearchException {
   int size = tasks.size();
   if (size > 0) {
     for (Future<?> f : tasks) {
       if (!f.isDone()) {
         logger.info("aborting knapsack task {}", f);
         boolean b = f.cancel(true);
         if (!b) {
           logger.error("knapsack task {} could not be cancelled", f);
         }
       }
     }
     tasks.clear();
   }
   logger.info("knapsack shutdown...");
   executor.shutdown();
   try {
     this.executor.awaitTermination(5, TimeUnit.SECONDS);
   } catch (InterruptedException e) {
     throw new ElasticsearchException(e.getMessage());
   }
   if (!executor.isShutdown()) {
     logger.info("knapsack shutdown now");
     executor.shutdownNow();
     try {
       this.executor.awaitTermination(5, TimeUnit.SECONDS);
     } catch (InterruptedException e) {
       throw new ElasticsearchException(e.getMessage());
     }
   }
   logger.info("knapsack shutdown complete");
 }
 private List<KnapsackState> get(String name) throws IOException {
   ImmutableList.Builder<KnapsackState> builder = ImmutableList.builder();
   try {
     logger.debug("get knapsack states: {}", name);
     final Client client = injector.getInstance(Client.class);
     createIndexIfNotExist(client);
     GetResponse getResponse =
         client.prepareGet(INDEX_NAME, MAPPING_NAME, name).execute().actionGet();
     if (!getResponse.isExists()) {
       return builder.build();
     }
     XContentParser parser = xContent(JSON).createParser(getResponse.getSourceAsBytes());
     while (parser.nextToken() != START_ARRAY) {
       // forward
     }
     while (parser.nextToken() != END_ARRAY) {
       KnapsackState state = new KnapsackState();
       builder.add(state.fromXContent(parser));
     }
     return builder.build();
   } catch (Throwable t) {
     logger.error("get settings failed", t);
     return null;
   }
 }
예제 #24
0
  @Test
  public void rerouteExplain() {
    Settings commonSettings = settingsBuilder().build();

    logger.info("--> starting a node");
    String node_1 = cluster().startNode(commonSettings);

    assertThat(cluster().size(), equalTo(1));
    ClusterHealthResponse healthResponse =
        client().admin().cluster().prepareHealth().setWaitForNodes("1").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> create an index with 1 shard");
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(
            settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
        .execute()
        .actionGet();

    ensureGreen("test");

    logger.info("--> disable allocation");
    Settings newSettings =
        settingsBuilder()
            .put(
                EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE,
                EnableAllocationDecider.Allocation.NONE.name())
            .build();
    client()
        .admin()
        .cluster()
        .prepareUpdateSettings()
        .setTransientSettings(newSettings)
        .execute()
        .actionGet();

    logger.info("--> starting a second node");
    String node_2 = cluster().startNode(commonSettings);
    assertThat(cluster().size(), equalTo(2));
    healthResponse =
        client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));

    logger.info("--> try to move the shard from node1 to node2");
    MoveAllocationCommand cmd = new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2);
    ClusterRerouteResponse resp =
        client().admin().cluster().prepareReroute().add(cmd).setExplain(true).execute().actionGet();
    RoutingExplanations e = resp.getExplanations();
    assertThat(e.explanations().size(), equalTo(1));
    RerouteExplanation explanation = e.explanations().get(0);
    assertThat(explanation.command().name(), equalTo(cmd.name()));
    assertThat(((MoveAllocationCommand) explanation.command()).shardId(), equalTo(cmd.shardId()));
    assertThat(((MoveAllocationCommand) explanation.command()).fromNode(), equalTo(cmd.fromNode()));
    assertThat(((MoveAllocationCommand) explanation.command()).toNode(), equalTo(cmd.toNode()));
    assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES));
  }
  @Test
  public void testSingleIndexShardFailed() {
    AllocationService strategy =
        createAllocationService(
            settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());

    logger.info("Building initial routing table");

    MetaData metaData =
        MetaData.builder()
            .put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
            .build();

    RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();

    ClusterState clusterState =
        ClusterState.builder().metaData(metaData).routingTable(routingTable).build();

    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());

    logger.info("Adding one node and rerouting");
    clusterState =
        ClusterState.builder(clusterState)
            .nodes(DiscoveryNodes.builder().put(newNode("node1")))
            .build();
    RoutingTable prevRoutingTable = routingTable;
    routingTable = strategy.reroute(clusterState).routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
    assertThat(
        routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));

    logger.info("Marking the shard as failed");
    RoutingNodes routingNodes = clusterState.routingNodes();
    prevRoutingTable = routingTable;
    routingTable =
        strategy
            .applyFailedShard(
                clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0))
            .routingTable();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

    assertThat(prevRoutingTable != routingTable, equalTo(true));
    assertThat(routingTable.index("test").shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
    assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
    assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
  }
  /**
   * Perform phase 3 of the recovery process
   *
   * <p>Phase3 again takes a snapshot of the translog, however this time the snapshot is acquired
   * under a write lock. The translog operations are sent to the target node where they are
   * replayed.
   *
   * <p>{@code InternalEngine#recover} is responsible for taking the snapshot of the translog, and
   * after phase 3 completes the snapshots from all three phases are released.
   */
  @Override
  public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException {
    if (shard.state() == IndexShardState.CLOSED) {
      throw new IndexShardClosedException(request.shardId());
    }
    cancellableThreads.checkForCancel();
    StopWatch stopWatch = new StopWatch().start();
    final int totalOperations;
    logger.trace(
        "[{}][{}] recovery [phase3] to {}: sending transaction log operations",
        indexName,
        shardId,
        request.targetNode());

    // Send the translog operations to the target node
    totalOperations = sendSnapshot(snapshot);

    cancellableThreads.execute(
        new Interruptable() {
          @Override
          public void run() throws InterruptedException {
            // Send the FINALIZE request to the target node. The finalize request
            // clears unreferenced translog files, refreshes the engine now that
            // new segments are available, and enables garbage collection of
            // tombstone files. The shard is also moved to the POST_RECOVERY phase
            // during this time
            transportService
                .submitRequest(
                    request.targetNode(),
                    RecoveryTarget.Actions.FINALIZE,
                    new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()),
                    TransportRequestOptions.options()
                        .withTimeout(recoverySettings.internalActionLongTimeout()),
                    EmptyTransportResponseHandler.INSTANCE_SAME)
                .txGet();
          }
        });

    if (request.markAsRelocated()) {
      // TODO what happens if the recovery process fails afterwards, we need to mark this back to
      // started
      try {
        shard.relocated("to " + request.targetNode());
      } catch (IllegalIndexShardStateException e) {
        // we can ignore this exception since, on the other node, when it moved to phase3
        // it will also send shard started, which might cause the index shard we work against
        // to move be closed by the time we get to the the relocated method
      }
    }
    stopWatch.stop();
    logger.trace(
        "[{}][{}] recovery [phase3] to {}: took [{}]",
        indexName,
        shardId,
        request.targetNode(),
        stopWatch.totalTime());
    response.phase3Time = stopWatch.totalTime().millis();
    response.phase3Operations = totalOperations;
  }
 protected String getJsonSettings(String jsonDefinition, Object... args) throws Exception {
   logger.debug("Get river setting");
   String setting = copyToStringFromClasspath(jsonDefinition);
   if (args != null) {
     setting = String.format(setting, args);
   }
   logger.debug("River setting: {}", setting);
   return setting;
 }
  /**
   * Stop a river
   *
   * @param river
   */
  public void stop(T river) {
    if (logger.isDebugEnabled()) logger.debug("stop({})", river);
    // We only add the river if the river is started
    if (river == null || !river.isStart()) return;

    riverService.stop(river);

    if (logger.isDebugEnabled()) logger.debug("/stop({})", river);
  }
예제 #29
0
  /** initialize native resources */
  public static void initializeNatives(
      Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
    final ESLogger logger = Loggers.getLogger(Bootstrap.class);

    // check if the user is running as root, and bail
    if (Natives.definitelyRunningAsRoot()) {
      if (Boolean.parseBoolean(System.getProperty("es.insecure.allow.root"))) {
        logger.warn("running as ROOT user. this is a bad idea!");
      } else {
        throw new RuntimeException("don't run elasticsearch as root.");
      }
    }

    // enable secure computing mode
    if (seccomp) {
      Natives.trySeccomp(tmpFile);
    }

    // mlockall if requested
    if (mlockAll) {
      if (Constants.WINDOWS) {
        Natives.tryVirtualLock();
      } else {
        Natives.tryMlockall();
      }
    }

    // listener for windows close event
    if (ctrlHandler) {
      Natives.addConsoleCtrlHandler(
          new ConsoleCtrlHandler() {
            @Override
            public boolean handle(int code) {
              if (CTRL_CLOSE_EVENT == code) {
                logger.info("running graceful exit on windows");
                try {
                  Bootstrap.stop();
                } catch (IOException e) {
                  throw new ElasticsearchException("failed to stop node", e);
                }
                return true;
              }
              return false;
            }
          });
    }

    // force remainder of JNA to be loaded (if available).
    try {
      JNAKernel32Library.getInstance();
    } catch (Throwable ignored) {
      // we've already logged this.
    }

    // init lucene random seed. it will use /dev/urandom where available:
    StringHelper.randomId();
  }
  /**
   * Start a river
   *
   * @param river
   */
  public void start(T river) {
    if (logger.isDebugEnabled()) logger.debug("start({})", river);
    // We only add the river if the river is started
    if (river == null || !river.isStart()) return;

    riverService.start(river, getHelper().toXContent(river));

    if (logger.isDebugEnabled()) logger.debug("/start({})", river);
  }