Пример #1
0
  public Node start() {
    if (!lifecycle.moveToStarted()) {
      return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("starting ...");

    // hack around dependency injection problem (for now...)
    injector
        .getInstance(Discovery.class)
        .setAllocationService(injector.getInstance(AllocationService.class));

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).start();
    }

    injector.getInstance(MappingUpdatedAction.class).start();
    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(IndicesTTLService.class).start();
    injector.getInstance(RiversManager.class).start();
    injector.getInstance(SnapshotsService.class).start();
    injector.getInstance(TransportService.class).start();
    injector.getInstance(ClusterService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
    discoService.waitForInitialState();

    // gateway should start after disco, so it can try and recovery from gateway on "start"
    injector.getInstance(GatewayService.class).start();

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).start();
    }
    injector.getInstance(ResourceWatcherService.class).start();
    injector.getInstance(TribeService.class).start();

    logger.info("started");

    return this;
  }
Пример #2
0
  public Node start() {
    if (!lifecycle.moveToStarted()) {
      return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("{{}}[{}]: starting ...", Version.full(), JvmInfo.jvmInfo().pid());

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      injector.getInstance(plugin).start();
    }

    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryBufferController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(RiversManager.class).start();
    injector.getInstance(ClusterService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();
    injector.getInstance(TransportService.class).start();
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();

    // gateway should start after disco, so it can try and recovery from gateway on "start"
    injector.getInstance(GatewayService.class).start();

    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).start();
    }
    injector
        .getInstance(JmxService.class)
        .connectAndRegister(
            discoService.nodeDescription(), injector.getInstance(NetworkService.class));

    logger.info("{{}}[{}]: started", Version.full(), JvmInfo.jvmInfo().pid());

    return this;
  }
Пример #3
0
  @Inject
  public TribeService(
      Settings settings, ClusterService clusterService, DiscoveryService discoveryService) {
    super(settings);
    this.clusterService = clusterService;
    Map<String, Settings> nodesSettings = new HashMap<>(settings.getGroups("tribe", true));
    nodesSettings.remove("blocks"); // remove prefix settings that don't indicate a client
    nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client
    for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
      Settings.Builder sb = Settings.builder().put(entry.getValue());
      sb.put("name", settings.get("name") + "/" + entry.getKey());
      sb.put(
          Environment.PATH_HOME_SETTING.getKey(),
          Environment.PATH_HOME_SETTING.get(settings)); // pass through ES home dir
      sb.put(TRIBE_NAME, entry.getKey());
      if (sb.get("http.enabled") == null) {
        sb.put("http.enabled", false);
      }
      sb.put(Node.NODE_CLIENT_SETTING.getKey(), true);
      nodes.add(new TribeClientNode(sb.build()));
    }

    String[] blockIndicesWrite = Strings.EMPTY_ARRAY;
    String[] blockIndicesRead = Strings.EMPTY_ARRAY;
    String[] blockIndicesMetadata = Strings.EMPTY_ARRAY;
    if (!nodes.isEmpty()) {
      // remove the initial election / recovery blocks since we are not going to have a
      // master elected in this single tribe  node local "cluster"
      clusterService.removeInitialStateBlock(discoveryService.getNoMasterBlock());
      clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
      if (settings.getAsBoolean("tribe.blocks.write", false)) {
        clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK);
      }
      blockIndicesWrite = settings.getAsArray("tribe.blocks.write.indices", Strings.EMPTY_ARRAY);
      if (settings.getAsBoolean("tribe.blocks.metadata", false)) {
        clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK);
      }
      blockIndicesMetadata =
          settings.getAsArray("tribe.blocks.metadata.indices", Strings.EMPTY_ARRAY);
      blockIndicesRead = settings.getAsArray("tribe.blocks.read.indices", Strings.EMPTY_ARRAY);
      for (Node node : nodes) {
        node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
      }
    }
    this.blockIndicesMetadata = blockIndicesMetadata;
    this.blockIndicesRead = blockIndicesRead;
    this.blockIndicesWrite = blockIndicesWrite;

    this.onConflict = settings.get("tribe.on_conflict", ON_CONFLICT_ANY);
  }
Пример #4
0
  @Override
  protected void doStart() throws ElasticsearchException {
    Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
    // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
    final String nodeId = DiscoveryService.generateNodeId(settings);
    localNode =
        new DiscoveryNode(
            settings.get("name"),
            nodeId,
            transportService.boundAddress().publishAddress(),
            nodeAttributes,
            version);
    latestDiscoNodes =
        new DiscoveryNodes.Builder().put(localNode).localNodeId(localNode.id()).build();
    nodesFD.updateNodes(latestDiscoNodes);
    pingService.start();

    // do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is
    // discovered
    asyncJoinCluster();
  }
 @Override
 public DiscoveryNode localNode() {
   return discoveryService.localNode();
 }
  @Test
  public void testFileUriCollect() throws Exception {
    ClusterService clusterService = mock(ClusterService.class);
    DiscoveryNode discoveryNode = mock(DiscoveryNode.class);
    when(discoveryNode.id()).thenReturn("dummyNodeId");
    DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class);
    when(discoveryNodes.localNodeId()).thenReturn("dummyNodeId");
    ClusterState clusterState = mock(ClusterState.class);
    when(clusterState.nodes()).thenReturn(discoveryNodes);
    when(clusterService.state()).thenReturn(clusterState);
    DiscoveryService discoveryService = mock(DiscoveryService.class);
    when(discoveryService.localNode()).thenReturn(discoveryNode);
    IndicesService indicesService = mock(IndicesService.class);
    Functions functions =
        new Functions(
            ImmutableMap.<FunctionIdent, FunctionImplementation>of(),
            ImmutableMap.<String, DynamicFunctionResolver>of());
    ReferenceResolver referenceResolver =
        new ReferenceResolver() {
          @Override
          public ReferenceImplementation getImplementation(ReferenceIdent ident) {
            return null;
          }
        };

    NodeSettingsService nodeSettingsService = mock(NodeSettingsService.class);

    MapSideDataCollectOperation collectOperation =
        new MapSideDataCollectOperation(
            clusterService,
            ImmutableSettings.EMPTY,
            mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS.get()),
            mock(BulkRetryCoordinatorPool.class),
            functions,
            referenceResolver,
            mock(NodeSysExpression.class),
            indicesService,
            new ThreadPool(
                ImmutableSettings.builder().put("name", getClass().getName()).build(), null),
            new CollectServiceResolver(
                discoveryService,
                new SystemCollectService(
                    discoveryService,
                    functions,
                    new StatsTables(ImmutableSettings.EMPTY, nodeSettingsService))),
            mock(InformationSchemaCollectService.class),
            mock(UnassignedShardsCollectService.class));

    File tmpFile = temporaryFolder.newFile("fileUriCollectOperation.json");
    try (FileWriter writer = new FileWriter(tmpFile)) {
      writer.write("{\"name\": \"Arthur\", \"id\": 4, \"details\": {\"age\": 38}}\n");
      writer.write("{\"id\": 5, \"name\": \"Trillian\", \"details\": {\"age\": 33}}\n");
    }

    Routing routing =
        new Routing(
            TreeMapBuilder.<String, Map<String, List<Integer>>>newMapBuilder()
                .put("dummyNodeId", new TreeMap<String, List<Integer>>())
                .map());
    FileUriCollectPhase collectNode =
        new FileUriCollectPhase(
            UUID.randomUUID(),
            0,
            "test",
            routing,
            Literal.newLiteral(Paths.get(tmpFile.toURI()).toUri().toString()),
            Arrays.<Symbol>asList(
                createReference("name", DataTypes.STRING),
                createReference(new ColumnIdent("details", "age"), DataTypes.INTEGER)),
            Arrays.<Projection>asList(),
            null,
            false);
    CollectingProjector cd = new CollectingProjector();
    cd.startProjection(mock(ExecutionState.class));
    collectOperation.collect(collectNode, cd, mock(JobCollectContext.class));
    assertThat(cd.result().get(), contains(isRow("Arthur", 38), isRow("Trillian", 33)));
  }