@Inject
  public CachingClusteredClient(
      QueryToolChestWarehouse warehouse,
      TimelineServerView serverView,
      Cache cache,
      @Smile ObjectMapper objectMapper,
      @BackgroundCaching ExecutorService backgroundExecutorService,
      CacheConfig cacheConfig) {
    this.warehouse = warehouse;
    this.serverView = serverView;
    this.cache = cache;
    this.objectMapper = objectMapper;
    this.cacheConfig = cacheConfig;
    this.backgroundExecutorService = MoreExecutors.listeningDecorator(backgroundExecutorService);

    serverView.registerSegmentCallback(
        Execs.singleThreaded("CCClient-ServerView-CB-%d"),
        new ServerView.BaseSegmentCallback() {
          @Override
          public ServerView.CallbackAction segmentRemoved(
              DruidServerMetadata server, DataSegment segment) {
            CachingClusteredClient.this.cache.close(segment.getIdentifier());
            return ServerView.CallbackAction.CONTINUE;
          }
        });
  }
Esempio n. 2
0
 @Override
 public synchronized ListenableFuture<TaskStatus> run(final Task task) {
   final String taskId = task.getId();
   ListenableFuture<TaskStatus> future =
       MoreExecutors.listeningDecorator(Execs.singleThreaded("noop_test_task_exec_%s"))
           .submit(
               new Callable<TaskStatus>() {
                 @Override
                 public TaskStatus call() throws Exception {
                   // adding of task to list of runningTasks should be done before count down as
                   // getRunningTasks may not include the task for which latch has been counted
                   // down
                   // Count down to let know that task is actually running
                   // this is equivalent of getting process holder to run task in
                   // ForkingTaskRunner
                   runningTasks.add(taskId);
                   if (runLatches != null) {
                     runLatches[Integer.parseInt(taskId)].countDown();
                   }
                   // Wait for completion count down
                   if (completionLatches != null) {
                     completionLatches[Integer.parseInt(taskId)].await();
                   }
                   taskRunnerWorkItems.remove(taskId);
                   runningTasks.remove(taskId);
                   return TaskStatus.success(taskId);
                 }
               });
   TaskRunnerWorkItem taskRunnerWorkItem =
       new TaskRunnerWorkItem(taskId, future) {
         @Override
         public TaskLocation getLocation() {
           return TASK_LOCATION;
         }
       };
   taskRunnerWorkItems.put(taskId, taskRunnerWorkItem);
   return future;
 }
  @LifecycleStart
  public void start() {
    synchronized (lock) {
      if (started) {
        return;
      }

      this.exec = Execs.scheduledSingleThreaded("CoordinatorRuleManager-Exec--%d");

      ScheduledExecutors.scheduleWithFixedDelay(
          exec,
          new Duration(0),
          config.get().getPollPeriod().toStandardDuration(),
          new Runnable() {
            @Override
            public void run() {
              poll();
            }
          });

      started = true;
    }
  }
Esempio n. 4
0
  private LeaderLatch createNewLeaderLatch() {
    final LeaderLatch newLeaderLatch =
        new LeaderLatch(
            curator,
            ZKPaths.makePath(zkPaths.getCoordinatorPath(), COORDINATOR_OWNER_NODE),
            config.getHost());

    newLeaderLatch.addListener(
        new LeaderLatchListener() {
          @Override
          public void isLeader() {
            DruidCoordinator.this.becomeLeader();
          }

          @Override
          public void notLeader() {
            DruidCoordinator.this.stopBeingLeader();
          }
        },
        Execs.singleThreaded("CoordinatorLeader-%s"));

    return leaderLatch.getAndSet(newLeaderLatch);
  }
  @Setup
  public void setup() throws IOException {
    log.info("SETUP CALLED AT " + System.currentTimeMillis());

    if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
      ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(Hashing.murmur3_128()));
    }

    executorService = Execs.multiThreaded(numSegments, "TimeseriesThreadPool");

    setupQueries();

    String[] schemaQuery = schemaAndQuery.split("\\.");
    String schemaName = schemaQuery[0];
    String queryName = schemaQuery[1];

    schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schemaName);
    query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);

    incIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
      log.info("Generating rows for segment " + i);
      BenchmarkDataGenerator gen =
          new BenchmarkDataGenerator(
              schemaInfo.getColumnSchemas(),
              RNG_SEED + i,
              schemaInfo.getDataInterval(),
              rowsPerSegment);

      IncrementalIndex incIndex = makeIncIndex();

      for (int j = 0; j < rowsPerSegment; j++) {
        InputRow row = gen.nextRow();
        if (j % 10000 == 0) {
          log.info(j + " rows generated.");
        }
        incIndex.add(row);
      }
      log.info(rowsPerSegment + " rows generated");
      incIndexes.add(incIndex);
    }

    File tmpFile = Files.createTempDir();
    log.info("Using temp dir: " + tmpFile.getAbsolutePath());
    tmpFile.deleteOnExit();

    qIndexes = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
      File indexFile = INDEX_MERGER_V9.persist(incIndexes.get(i), tmpFile, new IndexSpec());

      QueryableIndex qIndex = INDEX_IO.loadIndex(indexFile);
      qIndexes.add(qIndex);
    }

    factory =
        new TimeseriesQueryRunnerFactory(
            new TimeseriesQueryQueryToolChest(
                QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()),
            new TimeseriesQueryEngine(),
            QueryBenchmarkUtil.NOOP_QUERYWATCHER);
  }
Esempio n. 6
0
  @Inject
  public BrokerServerView(
      QueryToolChestWarehouse warehouse,
      QueryWatcher queryWatcher,
      @Smile ObjectMapper smileMapper,
      @Client HttpClient httpClient,
      FilteredServerInventoryView baseView,
      TierSelectorStrategy tierSelectorStrategy,
      ServiceEmitter emitter,
      final BrokerSegmentWatcherConfig segmentWatcherConfig) {
    this.warehouse = warehouse;
    this.queryWatcher = queryWatcher;
    this.smileMapper = smileMapper;
    this.httpClient = httpClient;
    this.baseView = baseView;
    this.tierSelectorStrategy = tierSelectorStrategy;
    this.emitter = emitter;
    this.clients = Maps.newConcurrentMap();
    this.selectors = Maps.newHashMap();
    this.timelines = Maps.newHashMap();

    this.segmentFilter =
        new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
          @Override
          public boolean apply(Pair<DruidServerMetadata, DataSegment> input) {
            if (segmentWatcherConfig.getWatchedTiers() != null
                && !segmentWatcherConfig.getWatchedTiers().contains(input.lhs.getTier())) {
              return false;
            }

            if (segmentWatcherConfig.getWatchedDataSources() != null
                && !segmentWatcherConfig
                    .getWatchedDataSources()
                    .contains(input.rhs.getDataSource())) {
              return false;
            }

            return true;
          }
        };
    ExecutorService exec = Execs.singleThreaded("BrokerServerView-%s");
    baseView.registerSegmentCallback(
        exec,
        new ServerView.SegmentCallback() {
          @Override
          public ServerView.CallbackAction segmentAdded(
              DruidServerMetadata server, DataSegment segment) {
            serverAddedSegment(server, segment);
            return ServerView.CallbackAction.CONTINUE;
          }

          @Override
          public ServerView.CallbackAction segmentRemoved(
              final DruidServerMetadata server, DataSegment segment) {
            serverRemovedSegment(server, segment);
            return ServerView.CallbackAction.CONTINUE;
          }

          @Override
          public CallbackAction segmentViewInitialized() {
            initialized = true;
            return ServerView.CallbackAction.CONTINUE;
          }
        },
        segmentFilter);

    baseView.registerServerCallback(
        exec,
        new ServerView.ServerCallback() {
          @Override
          public ServerView.CallbackAction serverRemoved(DruidServer server) {
            removeServer(server);
            return ServerView.CallbackAction.CONTINUE;
          }
        });
  }
Esempio n. 7
0
  public ServerInventoryView(
      final EmittingLogger log,
      final String announcementsPath,
      final String inventoryPath,
      final CuratorFramework curator,
      final ObjectMapper jsonMapper,
      final TypeReference<InventoryType> typeReference) {
    this.log = log;
    this.inventoryManager =
        new CuratorInventoryManager<>(
            curator,
            new InventoryManagerConfig() {
              @Override
              public String getContainerPath() {
                return announcementsPath;
              }

              @Override
              public String getInventoryPath() {
                return inventoryPath;
              }
            },
            Execs.singleThreaded("ServerInventoryView-%s"),
            new CuratorInventoryManagerStrategy<DruidServer, InventoryType>() {
              @Override
              public DruidServer deserializeContainer(byte[] bytes) {
                try {
                  return jsonMapper.readValue(bytes, DruidServer.class);
                } catch (IOException e) {
                  throw Throwables.propagate(e);
                }
              }

              @Override
              public byte[] serializeContainer(DruidServer container) {
                try {
                  return jsonMapper.writeValueAsBytes(container);
                } catch (JsonProcessingException e) {
                  throw Throwables.propagate(e);
                }
              }

              @Override
              public InventoryType deserializeInventory(byte[] bytes) {
                try {
                  return jsonMapper.readValue(bytes, typeReference);
                } catch (IOException e) {
                  throw Throwables.propagate(e);
                }
              }

              @Override
              public byte[] serializeInventory(InventoryType inventory) {
                try {
                  return jsonMapper.writeValueAsBytes(inventory);
                } catch (JsonProcessingException e) {
                  throw Throwables.propagate(e);
                }
              }

              @Override
              public void newContainer(DruidServer container) {
                log.info("New Server[%s]", container);
              }

              @Override
              public void deadContainer(DruidServer deadContainer) {
                log.info("Server Disappeared[%s]", deadContainer);
                runServerCallbacks(deadContainer);
              }

              @Override
              public DruidServer updateContainer(
                  DruidServer oldContainer, DruidServer newContainer) {
                return newContainer.addDataSegments(oldContainer);
              }

              @Override
              public DruidServer addInventory(
                  final DruidServer container, String inventoryKey, final InventoryType inventory) {
                return addInnerInventory(container, inventoryKey, inventory);
              }

              @Override
              public DruidServer updateInventory(
                  DruidServer container, String inventoryKey, InventoryType inventory) {
                return updateInnerInventory(container, inventoryKey, inventory);
              }

              @Override
              public DruidServer removeInventory(final DruidServer container, String inventoryKey) {
                return removeInnerInventory(container, inventoryKey);
              }

              @Override
              public void inventoryInitialized() {
                log.info("Inventory Initialized");
                runSegmentCallbacks(
                    new Function<SegmentCallback, CallbackAction>() {
                      @Override
                      public CallbackAction apply(SegmentCallback input) {
                        return input.segmentViewInitialized();
                      }
                    });
              }
            });
  }
Esempio n. 8
0
 @Inject
 public ThreadPoolTaskRunner(TaskToolboxFactory toolboxFactory) {
   this.toolboxFactory = Preconditions.checkNotNull(toolboxFactory, "toolboxFactory");
   this.exec = MoreExecutors.listeningDecorator(Execs.singleThreaded("task-runner-%d"));
 }
Esempio n. 9
0
  public AppenderatorTester(final int maxRowsInMemory, final File basePersistDirectory) {
    objectMapper = new DefaultObjectMapper();
    objectMapper.registerSubtypes(LinearShardSpec.class);

    final Map<String, Object> parserMap =
        objectMapper.convertValue(
            new MapInputRowParser(
                new JSONParseSpec(
                    new TimestampSpec("ts", "auto", null), new DimensionsSpec(null, null, null))),
            Map.class);
    schema =
        new DataSchema(
            DATASOURCE,
            parserMap,
            new AggregatorFactory[] {
              new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met")
            },
            new UniformGranularitySpec(Granularity.MINUTE, QueryGranularities.NONE, null),
            objectMapper);

    tuningConfig =
        new RealtimeTuningConfig(
            maxRowsInMemory,
            null,
            null,
            basePersistDirectory,
            null,
            null,
            null,
            null,
            null,
            null,
            0,
            0,
            null,
            null);

    metrics = new FireDepartmentMetrics();
    queryExecutor = Execs.singleThreaded("queryExecutor(%d)");

    indexIO =
        new IndexIO(
            objectMapper,
            new ColumnConfig() {
              @Override
              public int columnCacheSizeBytes() {
                return 0;
              }
            });
    indexMerger = new IndexMerger(objectMapper, indexIO);

    emitter =
        new ServiceEmitter(
            "test",
            "test",
            new LoggingEmitter(
                new Logger(AppenderatorTester.class), LoggingEmitter.Level.INFO, objectMapper));
    emitter.start();
    EmittingLogger.registerEmitter(emitter);
    dataSegmentPusher =
        new DataSegmentPusher() {
          @Deprecated
          @Override
          public String getPathForHadoop(String dataSource) {
            return getPathForHadoop();
          }

          @Override
          public String getPathForHadoop() {
            throw new UnsupportedOperationException();
          }

          @Override
          public DataSegment push(File file, DataSegment segment) throws IOException {
            pushedSegments.add(segment);
            return segment;
          }
        };
    appenderator =
        Appenderators.createRealtime(
            schema,
            tuningConfig,
            metrics,
            dataSegmentPusher,
            objectMapper,
            indexIO,
            indexMerger,
            new DefaultQueryRunnerFactoryConglomerate(
                ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(
                    TimeseriesQuery.class,
                    new TimeseriesQueryRunnerFactory(
                        new TimeseriesQueryQueryToolChest(
                            new IntervalChunkingQueryRunnerDecorator(
                                queryExecutor, QueryRunnerTestHelper.NOOP_QUERYWATCHER, emitter)),
                        new TimeseriesQueryEngine(),
                        QueryRunnerTestHelper.NOOP_QUERYWATCHER))),
            new DataSegmentAnnouncer() {
              @Override
              public void announceSegment(DataSegment segment) throws IOException {}

              @Override
              public void unannounceSegment(DataSegment segment) throws IOException {}

              @Override
              public void announceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public boolean isAnnounced(DataSegment segment) {
                return false;
              }
            },
            emitter,
            queryExecutor,
            MapCache.create(2048),
            new CacheConfig());
  }