Exemplo n.º 1
0
  @Override
  public void addSegment(DataSegment segment) {
    try {
      log.info("Loading segment %s", segment.getIdentifier());

      try {
        serverManager.loadSegment(segment);
      } catch (Exception e) {
        removeSegment(segment);
        throw new SegmentLoadingException(
            e, "Exception loading segment[%s]", segment.getIdentifier());
      }

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.exists()) {
        try {
          jsonMapper.writeValue(segmentInfoCacheFile, segment);
        } catch (IOException e) {
          removeSegment(segment);
          throw new SegmentLoadingException(
              e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
        }
      }

      try {
        announcer.announceSegment(segment);
      } catch (IOException e) {
        throw new SegmentLoadingException(
            e, "Failed to announce segment[%s]", segment.getIdentifier());
      }

    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segment for dataSource").addData("segment", segment).emit();
    }
  }
Exemplo n.º 2
0
  private void loadCache() {
    File baseDir = config.getInfoDir();
    if (!baseDir.exists()) {
      return;
    }

    List<DataSegment> cachedSegments = Lists.newArrayList();
    for (File file : baseDir.listFiles()) {
      log.info("Loading segment cache file [%s]", file);
      try {
        DataSegment segment = jsonMapper.readValue(file, DataSegment.class);
        if (serverManager.isSegmentCached(segment)) {
          cachedSegments.add(segment);
        } else {
          log.warn(
              "Unable to find cache file for %s. Deleting lookup entry", segment.getIdentifier());

          File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
          if (!segmentInfoCacheFile.delete()) {
            log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
          }
        }
      } catch (Exception e) {
        log.makeAlert(e, "Failed to load segment from segmentInfo file")
            .addData("file", file)
            .emit();
      }
    }

    addSegments(cachedSegments);
  }
Exemplo n.º 3
0
 @DELETE
 @Path("{id}")
 @Produces(MediaType.APPLICATION_JSON)
 public Response getServer(@PathParam("id") String queryId) {
   if (log.isDebugEnabled()) {
     log.debug("Received cancel request for query [%s]", queryId);
   }
   queryManager.cancelQuery(queryId);
   return Response.status(Response.Status.ACCEPTED).build();
 }
Exemplo n.º 4
0
  public void addSegments(Iterable<DataSegment> segments) {
    try {
      final List<String> segmentFailures = Lists.newArrayList();
      final List<DataSegment> validSegments = Lists.newArrayList();

      for (DataSegment segment : segments) {
        log.info("Loading segment %s", segment.getIdentifier());

        try {
          serverManager.loadSegment(segment);
        } catch (Exception e) {
          log.error(e, "Exception loading segment[%s]", segment.getIdentifier());
          removeSegment(segment);
          segmentFailures.add(segment.getIdentifier());
          continue;
        }

        File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
        if (!segmentInfoCacheFile.exists()) {
          try {
            jsonMapper.writeValue(segmentInfoCacheFile, segment);
          } catch (IOException e) {
            log.error(
                e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
            removeSegment(segment);
            segmentFailures.add(segment.getIdentifier());
            continue;
          }
        }

        validSegments.add(segment);
      }

      try {
        announcer.announceSegments(validSegments);
      } catch (IOException e) {
        throw new SegmentLoadingException(e, "Failed to announce segments[%s]", segments);
      }

      if (!segmentFailures.isEmpty()) {
        for (String segmentFailure : segmentFailures) {
          log.error("%s failed to load", segmentFailure);
        }
        throw new SegmentLoadingException(
            "%,d errors seen while loading segments", segmentFailures.size());
      }
    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segments for dataSource")
          .addData("segments", segments)
          .emit();
    }
  }
Exemplo n.º 5
0
  @Override
  public void removeSegment(DataSegment segment) {
    try {
      serverManager.dropSegment(segment);

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.delete()) {
        log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
      }

      announcer.unannounceSegment(segment);
    } catch (Exception e) {
      log.makeAlert(e, "Failed to remove segment").addData("segment", segment).emit();
    }
  }
Exemplo n.º 6
0
  public void dropSegment(String from, String segmentName, final LoadPeonCallback callback) {
    try {
      final DruidServer fromServer = serverInventoryView.getInventoryValue(from);
      if (fromServer == null) {
        throw new IAE("Unable to find server [%s]", from);
      }

      final DataSegment segment = fromServer.getSegment(segmentName);
      if (segment == null) {
        throw new IAE("Unable to find segment [%s] on server [%s]", segmentName, from);
      }

      final LoadQueuePeon dropPeon = loadManagementPeons.get(from);
      if (dropPeon == null) {
        throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", from);
      }

      if (!dropPeon.getSegmentsToDrop().contains(segment)) {
        dropPeon.dropSegment(segment, callback);
      }
    } catch (Exception e) {
      log.makeAlert(e, "Exception dropping segment %s", segmentName).emit();
      callback.execute();
    }
  }
Exemplo n.º 7
0
  @Before
  public void setUp() throws IOException {
    EmittingLogger.registerEmitter(new NoopServiceEmitter());

    queryWaitLatch = new CountDownLatch(1);
    queryWaitYieldLatch = new CountDownLatch(1);
    queryNotifyLatch = new CountDownLatch(1);
    factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
    serverManagerExec = Executors.newFixedThreadPool(2);
    serverManager =
        new ServerManager(
            new SegmentLoader() {
              @Override
              public boolean isSegmentLoaded(DataSegment segment) throws SegmentLoadingException {
                return false;
              }

              @Override
              public Segment getSegment(final DataSegment segment) {
                return new SegmentForTesting(
                    MapUtils.getString(segment.getLoadSpec(), "version"),
                    (Interval) segment.getLoadSpec().get("interval"));
              }

              @Override
              public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException {
                throw new UnsupportedOperationException();
              }

              @Override
              public void cleanup(DataSegment segment) throws SegmentLoadingException {}
            },
            new QueryRunnerFactoryConglomerate() {
              @Override
              public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(
                  QueryType query) {
                return (QueryRunnerFactory) factory;
              }
            },
            new NoopServiceEmitter(),
            serverManagerExec,
            new DefaultObjectMapper(),
            new LocalCacheProvider().get(),
            new CacheConfig());

    loadQueryable("test", "1", new Interval("P1d/2011-04-01"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-02"));
    loadQueryable("test", "2", new Interval("P1d/2011-04-02"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-03"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-04"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-05"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T01"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T02"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T03"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T05"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T06"));
    loadQueryable("test2", "1", new Interval("P1d/2011-04-01"));
    loadQueryable("test2", "1", new Interval("P1d/2011-04-02"));
  }
Exemplo n.º 8
0
 @Provides
 @ManageLifecycle
 public ServiceEmitter getServiceEmitter(
     @Self Supplier<DruidNode> configSupplier, Emitter emitter) {
   final DruidNode config = configSupplier.get();
   final ServiceEmitter retVal =
       new ServiceEmitter(config.getServiceName(), config.getHost(), emitter);
   EmittingLogger.registerEmitter(retVal);
   return retVal;
 }
Exemplo n.º 9
0
  private void stopBeingLeader() {
    synchronized (lock) {
      try {
        log.info("I am no longer the leader...");

        for (String server : loadManagementPeons.keySet()) {
          LoadQueuePeon peon = loadManagementPeons.remove(server);
          peon.stop();
        }
        loadManagementPeons.clear();

        serviceAnnouncer.unannounce(self);
        serverInventoryView.stop();
        databaseRuleManager.stop();
        databaseSegmentManager.stop();
        leader = false;
      } catch (Exception e) {
        log.makeAlert(e, "Unable to stopBeingLeader").emit();
      }
    }
  }
Exemplo n.º 10
0
  protected void addSingleInventory(final DruidServer container, final DataSegment inventory) {
    log.info("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier());

    if (container.getSegment(inventory.getIdentifier()) != null) {
      log.warn(
          "Not adding or running callbacks for existing segment[%s] on server[%s]",
          inventory.getIdentifier(), container.getName());

      return;
    }

    container.addDataSegment(inventory.getIdentifier(), inventory);

    runSegmentCallbacks(
        new Function<SegmentCallback, CallbackAction>() {
          @Override
          public CallbackAction apply(SegmentCallback input) {
            return input.segmentAdded(container.getMetadata(), inventory);
          }
        });
  }
Exemplo n.º 11
0
  protected void removeSingleInventory(final DruidServer container, String inventoryKey) {
    log.info("Server[%s] removed segment[%s]", container.getName(), inventoryKey);
    final DataSegment segment = container.getSegment(inventoryKey);

    if (segment == null) {
      log.warn(
          "Not running cleanup or callbacks for non-existing segment[%s] on server[%s]",
          inventoryKey, container.getName());

      return;
    }

    container.removeDataSegment(inventoryKey);

    runSegmentCallbacks(
        new Function<SegmentCallback, CallbackAction>() {
          @Override
          public CallbackAction apply(SegmentCallback input) {
            return input.segmentRemoved(container.getMetadata(), segment);
          }
        });
  }
Exemplo n.º 12
0
  private void initializeEmitter() {
    if (emitter == null) {
      final HttpClient httpClient =
          HttpClientInit.createClient(
              HttpClientConfig.builder().withNumConnections(1).build(), lifecycle);

      emitter =
          new ServiceEmitter(
              PropUtils.getProperty(props, "druid.service"),
              PropUtils.getProperty(props, "druid.host"),
              Emitters.create(props, httpClient, jsonMapper, lifecycle));
    }
    EmittingLogger.registerEmitter(emitter);
  }
Exemplo n.º 13
0
    @Override
    public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
      DatasourceWhitelist whitelist = whitelistRef.get();

      for (DataSegment dataSegment : params.getAvailableSegments()) {
        if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
          final Integer binaryVersion = dataSegment.getBinaryVersion();

          if (binaryVersion == null || binaryVersion < IndexIO.CURRENT_VERSION_ID) {
            log.info("Upgrading version on segment[%s]", dataSegment.getIdentifier());
            indexingServiceClient.upgradeSegment(dataSegment);
          }
        }
      }

      return params;
    }
Exemplo n.º 14
0
  @LifecycleStop
  public void stop() {
    log.info("Stopping ZkCoordinator for [%s]", me);
    synchronized (lock) {
      if (!started) {
        return;
      }

      try {
        loadQueueCache.close();
      } catch (Exception e) {
        throw Throwables.propagate(e);
      } finally {
        loadQueueCache = null;
        started = false;
      }
    }
  }
Exemplo n.º 15
0
  @Before
  public void setUp() throws IOException {
    EmittingLogger.registerEmitter(new NoopServiceEmitter());

    factory = new MyQueryRunnerFactory();
    serverManager =
        new ServerManager(
            new SegmentLoader() {
              @Override
              public Segment getSegment(final DataSegment segment) {
                return new SegmentForTesting(
                    MapUtils.getString(segment.getLoadSpec(), "version"),
                    (Interval) segment.getLoadSpec().get("interval"));
              }

              @Override
              public void cleanup(DataSegment segment) throws SegmentLoadingException {}
            },
            new QueryRunnerFactoryConglomerate() {
              @Override
              public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(
                  QueryType query) {
                return (QueryRunnerFactory) factory;
              }
            },
            new NoopServiceEmitter(),
            MoreExecutors.sameThreadExecutor());

    loadQueryable("test", "1", new Interval("P1d/2011-04-01"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-02"));
    loadQueryable("test", "2", new Interval("P1d/2011-04-02"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-03"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-04"));
    loadQueryable("test", "1", new Interval("P1d/2011-04-05"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T01"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T02"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T03"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T05"));
    loadQueryable("test", "2", new Interval("PT1h/2011-04-04T06"));
    loadQueryable("test2", "1", new Interval("P1d/2011-04-01"));
    loadQueryable("test2", "1", new Interval("P1d/2011-04-02"));
  }
Exemplo n.º 16
0
  @LifecycleStop
  public void stop() {
    synchronized (lock) {
      if (!started) {
        return;
      }

      stopBeingLeader();

      try {
        leaderLatch.get().close();
      } catch (IOException e) {
        log.warn(e, "Unable to close leaderLatch, ignoring");
      }

      started = false;

      exec.shutdownNow();
    }
  }
Exemplo n.º 17
0
    @Override
    public TaskStatus call() {
      final long startTime = System.currentTimeMillis();
      final File taskDir = toolbox.getTaskWorkDir();

      TaskStatus status;

      try {
        log.info("Running task: %s", task.getId());
        status = task.run(toolbox);
      } catch (InterruptedException e) {
        log.error(e, "Interrupted while running task[%s]", task);
        throw Throwables.propagate(e);
      } catch (Exception e) {
        log.error(e, "Exception while running task[%s]", task);
        status = TaskStatus.failure(task.getId());
      } catch (Throwable t) {
        log.error(t, "Uncaught Throwable while running task[%s]", task);
        throw Throwables.propagate(t);
      }

      try {
        if (taskDir.exists()) {
          log.info("Removing task directory: %s", taskDir);
          FileUtils.deleteDirectory(taskDir);
        }
      } catch (Exception e) {
        log.makeAlert(e, "Failed to delete task directory")
            .addData("taskDir", taskDir.toString())
            .addData("task", task.getId())
            .emit();
      }

      try {
        return status.withDuration(System.currentTimeMillis() - startTime);
      } catch (Exception e) {
        log.error(e, "Uncaught Exception during callback for task[%s]", task);
        throw Throwables.propagate(e);
      }
    }
Exemplo n.º 18
0
  @Override
  public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new RealtimePlumber(
        windowPeriod,
        basePersistDirectory,
        segmentGranularity,
        schema,
        metrics,
        rejectionPolicy,
        emitter,
        conglomerate,
        segmentAnnouncer,
        queryExecutorService,
        versioningPolicy,
        dataSegmentPusher,
        segmentPublisher,
        serverView,
        maxPendingPersists);
  }
Exemplo n.º 19
0
  private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query) {
    QueryRunner<T> queryRunner = null;
    final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames());

    for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem :
        ImmutableList.copyOf(runningItems)) {
      final Task task = taskRunnerWorkItem.getTask();
      if (task.getDataSource().equals(queryDataSource)) {
        final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);

        if (taskQueryRunner != null) {
          if (queryRunner == null) {
            queryRunner = taskQueryRunner;
          } else {
            log.makeAlert("Found too many query runners for datasource")
                .addData("dataSource", queryDataSource)
                .emit();
          }
        }
      }
    }

    return queryRunner == null ? new NoopQueryRunner<T>() : queryRunner;
  }
Exemplo n.º 20
0
  public Set<DataSegment> getAvailableDataSegments() {
    Set<DataSegment> availableSegments =
        Sets.newTreeSet(Comparators.inverse(DataSegment.bucketMonthComparator()));

    Iterable<DataSegment> dataSegments =
        Iterables.concat(
            Iterables.transform(
                databaseSegmentManager.getInventory(),
                new Function<DruidDataSource, Iterable<DataSegment>>() {
                  @Override
                  public Iterable<DataSegment> apply(DruidDataSource input) {
                    return input.getSegments();
                  }
                }));

    for (DataSegment dataSegment : dataSegments) {
      if (dataSegment.getSize() < 0) {
        log.makeAlert("No size on Segment, wtf?").addData("segment", dataSegment).emit();
      }
      availableSegments.add(dataSegment);
    }

    return availableSegments;
  }
Exemplo n.º 21
0
  @Override
  public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<Interval, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache =
        BaseQuery.getContextUseCache(query, true)
            && strategy != null
            && cacheConfig.isUseCache()
            && cacheConfig.isQueryCacheable(query);
    final boolean populateCache =
        BaseQuery.getContextPopulateCache(query, true)
            && strategy != null
            && cacheConfig.isPopulateCache()
            && cacheConfig.isQueryCacheable(query);
    final boolean isBySegment = BaseQuery.getContextBySegment(query, false);

    final ImmutableMap.Builder<String, Object> contextBuilder = new ImmutableMap.Builder<>();

    final int priority = BaseQuery.getContextPriority(query, 0);
    contextBuilder.put("priority", priority);

    if (populateCache) {
      // prevent down-stream nodes from caching results as well if we are populating the cache
      contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
      contextBuilder.put("bySegment", true);
    }

    TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());

    if (timeline == null) {
      return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    // Note that enabling this leads to putting uncovered intervals information in the response
    // headers
    // and might blow up in some cases https://github.com/druid-io/druid/issues/2108
    int uncoveredIntervalsLimit = BaseQuery.getContextUncoveredIntervalsLimit(query, 0);

    if (uncoveredIntervalsLimit > 0) {
      List<Interval> uncoveredIntervals = Lists.newArrayListWithCapacity(uncoveredIntervalsLimit);
      boolean uncoveredIntervalsOverflowed = false;

      for (Interval interval : query.getIntervals()) {
        Iterable<TimelineObjectHolder<String, ServerSelector>> lookup = timeline.lookup(interval);
        long startMillis = interval.getStartMillis();
        long endMillis = interval.getEndMillis();
        for (TimelineObjectHolder<String, ServerSelector> holder : lookup) {
          Interval holderInterval = holder.getInterval();
          long intervalStart = holderInterval.getStartMillis();
          if (!uncoveredIntervalsOverflowed && startMillis != intervalStart) {
            if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
              uncoveredIntervals.add(new Interval(startMillis, intervalStart));
            } else {
              uncoveredIntervalsOverflowed = true;
            }
          }
          startMillis = holderInterval.getEndMillis();
          serversLookup.add(holder);
        }

        if (!uncoveredIntervalsOverflowed && startMillis < endMillis) {
          if (uncoveredIntervalsLimit > uncoveredIntervals.size()) {
            uncoveredIntervals.add(new Interval(startMillis, endMillis));
          } else {
            uncoveredIntervalsOverflowed = true;
          }
        }
      }

      if (!uncoveredIntervals.isEmpty()) {
        // This returns intervals for which NO segment is present.
        // Which is not necessarily an indication that the data doesn't exist or is
        // incomplete. The data could exist and just not be loaded yet.  In either
        // case, though, this query will not include any data from the identified intervals.
        responseContext.put("uncoveredIntervals", uncoveredIntervals);
        responseContext.put("uncoveredIntervalsOverflowed", uncoveredIntervalsOverflowed);
      }
    } else {
      for (Interval interval : query.getIntervals()) {
        Iterables.addAll(serversLookup, timeline.lookup(interval));
      }
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup =
        toolChest.filterSegments(query, serversLookup);
    Map<String, Optional<RangeSet<String>>> dimensionRangeCache = Maps.newHashMap();

    // Filter unneeded chunks based on partition dimension
    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
      final Set<PartitionChunk<ServerSelector>> filteredChunks =
          DimFilterUtils.filterShards(
              query.getFilter(),
              holder.getObject(),
              new Function<PartitionChunk<ServerSelector>, ShardSpec>() {
                @Override
                public ShardSpec apply(PartitionChunk<ServerSelector> input) {
                  return input.getObject().getSegment().getShardSpec();
                }
              },
              dimensionRangeCache);
      for (PartitionChunk<ServerSelector> chunk : filteredChunks) {
        ServerSelector selector = chunk.getObject();
        final SegmentDescriptor descriptor =
            new SegmentDescriptor(
                holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
        segments.add(Pair.of(selector, descriptor));
      }
    }

    final byte[] queryCacheKey;

    if ((populateCache || useCache) // implies strategy != null
        && !isBySegment) // explicit bySegment queries are never cached
    {
      queryCacheKey = strategy.computeCacheKey(query);
    } else {
      queryCacheKey = null;
    }

    if (queryCacheKey != null) {
      // cachKeys map must preserve segment ordering, in order for shards to always be combined in
      // the same order
      Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys =
          Maps.newLinkedHashMap();
      for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final Cache.NamedKey segmentCacheKey =
            CacheUtil.computeSegmentCacheKey(
                segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
        cacheKeys.put(segment, segmentCacheKey);
      }

      // Pull cached segments from cache and remove from set of segments to query
      final Map<Cache.NamedKey, byte[]> cachedValues;
      if (useCache) {
        cachedValues =
            cache.getBulk(
                Iterables.limit(cacheKeys.values(), cacheConfig.getCacheBulkMergeLimit()));
      } else {
        cachedValues = ImmutableMap.of();
      }

      for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry :
          cacheKeys.entrySet()) {
        Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
        Cache.NamedKey segmentCacheKey = entry.getValue();
        final Interval segmentQueryInterval = segment.rhs.getInterval();

        final byte[] cachedValue = cachedValues.get(segmentCacheKey);
        if (cachedValue != null) {
          // remove cached segment from set of segments to query
          segments.remove(segment);
          cachedResults.add(Pair.of(segmentQueryInterval, cachedValue));
        } else if (populateCache) {
          // otherwise, if populating cache, add segment to list of segments to cache
          final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
          cachePopulatorMap.put(
              String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
              new CachePopulator(cache, objectMapper, segmentCacheKey));
        }
      }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
      final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

      if (queryableDruidServer == null) {
        log.makeAlert(
                "No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!",
                segment.rhs, query.getDataSource())
            .emit();
      } else {
        final DruidServer server = queryableDruidServer.getServer();
        List<SegmentDescriptor> descriptors = serverSegments.get(server);

        if (descriptors == null) {
          descriptors = Lists.newArrayList();
          serverSegments.put(server, descriptors);
        }

        descriptors.add(segment.rhs);
      }
    }

    return new LazySequence<>(
        new Supplier<Sequence<T>>() {
          @Override
          public Sequence<T> get() {
            ArrayList<Sequence<T>> sequencesByInterval = Lists.newArrayList();
            addSequencesFromCache(sequencesByInterval);
            addSequencesFromServer(sequencesByInterval);

            return mergeCachedAndUncachedSequences(query, sequencesByInterval);
          }

          private void addSequencesFromCache(ArrayList<Sequence<T>> listOfSequences) {
            if (strategy == null) {
              return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<Interval, byte[]> cachedResultPair : cachedResults) {
              final byte[] cachedResult = cachedResultPair.rhs;
              Sequence<Object> cachedSequence =
                  new BaseSequence<>(
                      new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                        @Override
                        public Iterator<Object> make() {
                          try {
                            if (cachedResult.length == 0) {
                              return Iterators.emptyIterator();
                            }

                            return objectMapper.readValues(
                                objectMapper.getFactory().createParser(cachedResult),
                                cacheObjectClazz);
                          } catch (IOException e) {
                            throw Throwables.propagate(e);
                          }
                        }

                        @Override
                        public void cleanup(Iterator<Object> iterFromMake) {}
                      });
              listOfSequences.add(Sequences.map(cachedSequence, pullFromCacheFunction));
            }
          }

          private void addSequencesFromServer(ArrayList<Sequence<T>> listOfSequences) {
            listOfSequences.ensureCapacity(listOfSequences.size() + serverSegments.size());

            final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());

            // Loop through each server, setting up the query and initiating it.
            // The data gets handled as a Future and parsed in the long Sequence chain in the
            // resultSeqToAdd setter.
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry :
                serverSegments.entrySet()) {
              final DruidServer server = entry.getKey();
              final List<SegmentDescriptor> descriptors = entry.getValue();

              final QueryRunner clientQueryable = serverView.getQueryRunner(server);

              if (clientQueryable == null) {
                log.error("WTF!? server[%s] doesn't have a client Queryable?", server);
                continue;
              }

              final MultipleSpecificSegmentSpec segmentSpec =
                  new MultipleSpecificSegmentSpec(descriptors);

              final Sequence<T> resultSeqToAdd;
              if (!server.isAssignable()
                  || !populateCache
                  || isBySegment) { // Direct server queryable
                if (!isBySegment) {
                  resultSeqToAdd =
                      clientQueryable.run(query.withQuerySegmentSpec(segmentSpec), responseContext);
                } else {
                  // bySegment queries need to be de-serialized, see DirectDruidClient.run()

                  @SuppressWarnings("unchecked")
                  final Query<Result<BySegmentResultValueClass<T>>> bySegmentQuery =
                      (Query<Result<BySegmentResultValueClass<T>>>) ((Query) query);

                  @SuppressWarnings("unchecked")
                  final Sequence<Result<BySegmentResultValueClass<T>>> resultSequence =
                      clientQueryable.run(
                          bySegmentQuery.withQuerySegmentSpec(segmentSpec), responseContext);

                  resultSeqToAdd =
                      (Sequence)
                          Sequences.map(
                              resultSequence,
                              new Function<
                                  Result<BySegmentResultValueClass<T>>,
                                  Result<BySegmentResultValueClass<T>>>() {
                                @Override
                                public Result<BySegmentResultValueClass<T>> apply(
                                    Result<BySegmentResultValueClass<T>> input) {
                                  final BySegmentResultValueClass<T> bySegmentValue =
                                      input.getValue();
                                  return new Result<>(
                                      input.getTimestamp(),
                                      new BySegmentResultValueClass<T>(
                                          Lists.transform(
                                              bySegmentValue.getResults(),
                                              toolChest.makePreComputeManipulatorFn(
                                                  query, MetricManipulatorFns.deserializing())),
                                          bySegmentValue.getSegmentId(),
                                          bySegmentValue.getInterval()));
                                }
                              });
                }
              } else { // Requires some manipulation on broker side
                @SuppressWarnings("unchecked")
                final Sequence<Result<BySegmentResultValueClass<T>>> runningSequence =
                    clientQueryable.run(
                        rewrittenQuery.withQuerySegmentSpec(segmentSpec), responseContext);
                resultSeqToAdd =
                    new MergeSequence(
                        query.getResultOrdering(),
                        Sequences.<Result<BySegmentResultValueClass<T>>, Sequence<T>>map(
                            runningSequence,
                            new Function<Result<BySegmentResultValueClass<T>>, Sequence<T>>() {
                              private final Function<T, Object> cacheFn =
                                  strategy.prepareForCache();

                              // Acctually do something with the results
                              @Override
                              public Sequence<T> apply(Result<BySegmentResultValueClass<T>> input) {
                                final BySegmentResultValueClass<T> value = input.getValue();
                                final CachePopulator cachePopulator =
                                    cachePopulatorMap.get(
                                        String.format(
                                            "%s_%s", value.getSegmentId(), value.getInterval()));

                                final Queue<ListenableFuture<Object>> cacheFutures =
                                    new ConcurrentLinkedQueue<>();

                                return Sequences.<T>withEffect(
                                    Sequences.<T, T>map(
                                        Sequences.<T, T>map(
                                            Sequences.<T>simple(value.getResults()),
                                            new Function<T, T>() {
                                              @Override
                                              public T apply(final T input) {
                                                if (cachePopulator != null) {
                                                  // only compute cache data if populating cache
                                                  cacheFutures.add(
                                                      backgroundExecutorService.submit(
                                                          new Callable<Object>() {
                                                            @Override
                                                            public Object call() {
                                                              return cacheFn.apply(input);
                                                            }
                                                          }));
                                                }
                                                return input;
                                              }
                                            }),
                                        toolChest.makePreComputeManipulatorFn(
                                            // Ick... most makePreComputeManipulatorFn directly cast
                                            // to their ToolChest query type of choice
                                            // This casting is sub-optimal, but hasn't caused any
                                            // major problems yet...
                                            (Query) rewrittenQuery,
                                            MetricManipulatorFns.deserializing())),
                                    new Runnable() {
                                      @Override
                                      public void run() {
                                        if (cachePopulator != null) {
                                          Futures.addCallback(
                                              Futures.allAsList(cacheFutures),
                                              new FutureCallback<List<Object>>() {
                                                @Override
                                                public void onSuccess(List<Object> cacheData) {
                                                  cachePopulator.populate(cacheData);
                                                  // Help out GC by making sure all references are
                                                  // gone
                                                  cacheFutures.clear();
                                                }

                                                @Override
                                                public void onFailure(Throwable throwable) {
                                                  log.error(throwable, "Background caching failed");
                                                }
                                              },
                                              backgroundExecutorService);
                                        }
                                      }
                                    },
                                    MoreExecutors.sameThreadExecutor()); // End withEffect
                              }
                            }));
              }

              listOfSequences.add(resultSeqToAdd);
            }
          }
        } // End of Supplier
        );
  }
Exemplo n.º 22
0
  public AppenderatorTester(final int maxRowsInMemory, final File basePersistDirectory) {
    objectMapper = new DefaultObjectMapper();
    objectMapper.registerSubtypes(LinearShardSpec.class);

    final Map<String, Object> parserMap =
        objectMapper.convertValue(
            new MapInputRowParser(
                new JSONParseSpec(
                    new TimestampSpec("ts", "auto", null), new DimensionsSpec(null, null, null))),
            Map.class);
    schema =
        new DataSchema(
            DATASOURCE,
            parserMap,
            new AggregatorFactory[] {
              new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met")
            },
            new UniformGranularitySpec(Granularity.MINUTE, QueryGranularities.NONE, null),
            objectMapper);

    tuningConfig =
        new RealtimeTuningConfig(
            maxRowsInMemory,
            null,
            null,
            basePersistDirectory,
            null,
            null,
            null,
            null,
            null,
            null,
            0,
            0,
            null,
            null);

    metrics = new FireDepartmentMetrics();
    queryExecutor = Execs.singleThreaded("queryExecutor(%d)");

    indexIO =
        new IndexIO(
            objectMapper,
            new ColumnConfig() {
              @Override
              public int columnCacheSizeBytes() {
                return 0;
              }
            });
    indexMerger = new IndexMerger(objectMapper, indexIO);

    emitter =
        new ServiceEmitter(
            "test",
            "test",
            new LoggingEmitter(
                new Logger(AppenderatorTester.class), LoggingEmitter.Level.INFO, objectMapper));
    emitter.start();
    EmittingLogger.registerEmitter(emitter);
    dataSegmentPusher =
        new DataSegmentPusher() {
          @Deprecated
          @Override
          public String getPathForHadoop(String dataSource) {
            return getPathForHadoop();
          }

          @Override
          public String getPathForHadoop() {
            throw new UnsupportedOperationException();
          }

          @Override
          public DataSegment push(File file, DataSegment segment) throws IOException {
            pushedSegments.add(segment);
            return segment;
          }
        };
    appenderator =
        Appenderators.createRealtime(
            schema,
            tuningConfig,
            metrics,
            dataSegmentPusher,
            objectMapper,
            indexIO,
            indexMerger,
            new DefaultQueryRunnerFactoryConglomerate(
                ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(
                    TimeseriesQuery.class,
                    new TimeseriesQueryRunnerFactory(
                        new TimeseriesQueryQueryToolChest(
                            new IntervalChunkingQueryRunnerDecorator(
                                queryExecutor, QueryRunnerTestHelper.NOOP_QUERYWATCHER, emitter)),
                        new TimeseriesQueryEngine(),
                        QueryRunnerTestHelper.NOOP_QUERYWATCHER))),
            new DataSegmentAnnouncer() {
              @Override
              public void announceSegment(DataSegment segment) throws IOException {}

              @Override
              public void unannounceSegment(DataSegment segment) throws IOException {}

              @Override
              public void announceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {}

              @Override
              public boolean isAnnounced(DataSegment segment) {
                return false;
              }
            },
            emitter,
            queryExecutor,
            MapCache.create(2048),
            new CacheConfig());
  }
Exemplo n.º 23
0
  @LifecycleStart
  public void start() throws IOException {
    synchronized (lock) {
      if (started) {
        return;
      }

      log.info("Starting zkCoordinator for server[%s]", me.getName());

      final String loadQueueLocation = ZKPaths.makePath(zkPaths.getLoadQueuePath(), me.getName());
      final String servedSegmentsLocation =
          ZKPaths.makePath(zkPaths.getServedSegmentsPath(), me.getName());
      final String liveSegmentsLocation =
          ZKPaths.makePath(zkPaths.getLiveSegmentsPath(), me.getName());

      loadQueueCache = new PathChildrenCache(curator, loadQueueLocation, true, true, loadingExec);

      try {
        curator.newNamespaceAwareEnsurePath(loadQueueLocation).ensure(curator.getZookeeperClient());
        curator
            .newNamespaceAwareEnsurePath(servedSegmentsLocation)
            .ensure(curator.getZookeeperClient());
        curator
            .newNamespaceAwareEnsurePath(liveSegmentsLocation)
            .ensure(curator.getZookeeperClient());

        loadLocalCache();

        loadQueueCache
            .getListenable()
            .addListener(
                new PathChildrenCacheListener() {
                  @Override
                  public void childEvent(CuratorFramework client, PathChildrenCacheEvent event)
                      throws Exception {
                    final ChildData child = event.getData();
                    switch (event.getType()) {
                      case CHILD_ADDED:
                        final String path = child.getPath();
                        final DataSegmentChangeRequest request =
                            jsonMapper.readValue(child.getData(), DataSegmentChangeRequest.class);

                        log.info("New request[%s] with zNode[%s].", request.asString(), path);

                        try {
                          request.go(
                              getDataSegmentChangeHandler(),
                              new DataSegmentChangeCallback() {
                                boolean hasRun = false;

                                @Override
                                public void execute() {
                                  try {
                                    if (!hasRun) {
                                      curator.delete().guaranteed().forPath(path);
                                      log.info("Completed request [%s]", request.asString());
                                      hasRun = true;
                                    }
                                  } catch (Exception e) {
                                    try {
                                      curator.delete().guaranteed().forPath(path);
                                    } catch (Exception e1) {
                                      log.error(
                                          e1,
                                          "Failed to delete zNode[%s], but ignoring exception.",
                                          path);
                                    }
                                    log.error(e, "Exception while removing zNode[%s]", path);
                                    throw Throwables.propagate(e);
                                  }
                                }
                              });
                        } catch (Exception e) {
                          try {
                            curator.delete().guaranteed().forPath(path);
                          } catch (Exception e1) {
                            log.error(
                                e1, "Failed to delete zNode[%s], but ignoring exception.", path);
                          }

                          log.makeAlert(e, "Segment load/unload: uncaught exception.")
                              .addData("node", path)
                              .addData("nodeProperties", request)
                              .emit();
                        }

                        break;
                      case CHILD_REMOVED:
                        log.info("zNode[%s] was removed", event.getData().getPath());
                        break;
                      default:
                        log.info("Ignoring event[%s]", event);
                    }
                  }
                });
        loadQueueCache.start();
      } catch (Exception e) {
        Throwables.propagateIfPossible(e, IOException.class);
        throw Throwables.propagate(e);
      }

      started = true;
    }
  }
Exemplo n.º 24
0
  @Override
  public Sequence<T> run(final Query<T> query) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<DateTime, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache =
        Boolean.parseBoolean(query.getContextValue("useCache", "true")) && strategy != null;
    final boolean populateCache =
        Boolean.parseBoolean(query.getContextValue("populateCache", "true")) && strategy != null;
    final boolean isBySegment = Boolean.parseBoolean(query.getContextValue("bySegment", "false"));

    ImmutableMap.Builder<String, String> contextBuilder = new ImmutableMap.Builder<>();

    final String priority = query.getContextValue("priority", "0");
    contextBuilder.put("priority", priority);

    if (populateCache) {
      contextBuilder.put("bySegment", "true");
    }
    contextBuilder.put("intermediate", "true");

    final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());

    VersionedIntervalTimeline<String, ServerSelector> timeline =
        serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
      return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    for (Interval interval : rewrittenQuery.getIntervals()) {
      serversLookup.addAll(timeline.lookup(interval));
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup =
        toolChest.filterSegments(query, serversLookup);

    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
      for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
        ServerSelector selector = chunk.getObject();
        final SegmentDescriptor descriptor =
            new SegmentDescriptor(
                holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());

        segments.add(Pair.of(selector, descriptor));
      }
    }

    final byte[] queryCacheKey;
    if (strategy != null) {
      queryCacheKey = strategy.computeCacheKey(query);
    } else {
      queryCacheKey = null;
    }

    if (queryCacheKey != null) {
      Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newHashMap();
      for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final Cache.NamedKey segmentCacheKey =
            computeSegmentCacheKey(
                segment.lhs.getSegment().getIdentifier(), segment.rhs, queryCacheKey);
        cacheKeys.put(segment, segmentCacheKey);
      }

      // Pull cached segments from cache and remove from set of segments to query
      final Map<Cache.NamedKey, byte[]> cachedValues;
      if (useCache) {
        cachedValues = cache.getBulk(cacheKeys.values());
      } else {
        cachedValues = ImmutableMap.of();
      }

      for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry :
          cacheKeys.entrySet()) {
        Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
        Cache.NamedKey segmentCacheKey = entry.getValue();
        final Interval segmentQueryInterval = segment.rhs.getInterval();

        final byte[] cachedValue = cachedValues.get(segmentCacheKey);
        if (cachedValue != null) {
          // remove cached segment from set of segments to query
          segments.remove(segment);
          cachedResults.add(Pair.of(segmentQueryInterval.getStart(), cachedValue));
        } else if (populateCache) {
          final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
          cachePopulatorMap.put(
              String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
              new CachePopulator(cache, objectMapper, segmentCacheKey));
        }
      }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
      final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

      if (queryableDruidServer == null) {
        log.error("No servers found for %s?! How can this be?!", segment.rhs);
      } else {
        final DruidServer server = queryableDruidServer.getServer();
        List<SegmentDescriptor> descriptors = serverSegments.get(server);

        if (descriptors == null) {
          descriptors = Lists.newArrayList();
          serverSegments.put(server, descriptors);
        }

        descriptors.add(segment.rhs);
      }
    }

    return new LazySequence<>(
        new Supplier<Sequence<T>>() {
          @Override
          public Sequence<T> get() {
            ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences = Lists.newArrayList();

            addSequencesFromServer(listOfSequences);
            addSequencesFromCache(listOfSequences);

            Collections.sort(
                listOfSequences,
                Ordering.natural().onResultOf(Pair.<DateTime, Sequence<T>>lhsFn()));

            final Sequence<Sequence<T>> seq =
                Sequences.simple(
                    Iterables.transform(listOfSequences, Pair.<DateTime, Sequence<T>>rhsFn()));
            if (strategy == null) {
              return toolChest.mergeSequences(seq);
            } else {
              return strategy.mergeSequences(seq);
            }
          }

          private void addSequencesFromCache(
              ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            if (strategy == null) {
              return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<DateTime, byte[]> cachedResultPair : cachedResults) {
              final byte[] cachedResult = cachedResultPair.rhs;
              Sequence<Object> cachedSequence =
                  new BaseSequence<>(
                      new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                        @Override
                        public Iterator<Object> make() {
                          try {
                            if (cachedResult.length == 0) {
                              return Iterators.emptyIterator();
                            }

                            return objectMapper.readValues(
                                objectMapper.getFactory().createParser(cachedResult),
                                cacheObjectClazz);
                          } catch (IOException e) {
                            throw Throwables.propagate(e);
                          }
                        }

                        @Override
                        public void cleanup(Iterator<Object> iterFromMake) {}
                      });
              listOfSequences.add(
                  Pair.of(
                      cachedResultPair.lhs, Sequences.map(cachedSequence, pullFromCacheFunction)));
            }
          }

          @SuppressWarnings("unchecked")
          private void addSequencesFromServer(
              ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry :
                serverSegments.entrySet()) {
              final DruidServer server = entry.getKey();
              final List<SegmentDescriptor> descriptors = entry.getValue();

              final QueryRunner clientQueryable = serverView.getQueryRunner(server);
              if (clientQueryable == null) {
                log.makeAlert("WTF!? server[%s] doesn't have a client Queryable?", server).emit();
                continue;
              }

              final Sequence<T> resultSeqToAdd;
              final MultipleSpecificSegmentSpec segmentSpec =
                  new MultipleSpecificSegmentSpec(descriptors);
              List<Interval> intervals = segmentSpec.getIntervals();

              if (!server.isAssignable() || !populateCache || isBySegment) {
                resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec));
              } else {
                resultSeqToAdd =
                    toolChest.mergeSequences(
                        Sequences.map(
                            clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec)),
                            new Function<Object, Sequence<T>>() {
                              private final Function<T, Object> prepareForCache =
                                  strategy.prepareForCache();

                              @Override
                              public Sequence<T> apply(Object input) {
                                Result<Object> result = (Result<Object>) input;
                                final BySegmentResultValueClass<T> value =
                                    (BySegmentResultValueClass<T>) result.getValue();
                                String segmentIdentifier = value.getSegmentId();
                                final Iterable<T> segmentResults = value.getResults();

                                CachePopulator cachePopulator =
                                    cachePopulatorMap.get(
                                        String.format(
                                            "%s_%s", segmentIdentifier, value.getInterval()));
                                if (cachePopulator != null) {
                                  cachePopulator.populate(
                                      Iterables.transform(segmentResults, prepareForCache));
                                }

                                return Sequences.simple(
                                    Iterables.transform(
                                        segmentResults,
                                        toolChest.makeMetricManipulatorFn(
                                            rewrittenQuery,
                                            new MetricManipulationFn() {
                                              @Override
                                              public Object manipulate(
                                                  AggregatorFactory factory, Object object) {
                                                return factory.deserialize(object);
                                              }
                                            })));
                              }
                            }));
              }

              listOfSequences.add(Pair.of(intervals.get(0).getStart(), resultSeqToAdd));
            }
          }
        });
  }
Exemplo n.º 25
0
  @Override
  public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    if (this.plumber != null) {
      throw new IllegalStateException("WTF?!? run with non-null plumber??!");
    }

    // Shed any locks we might have (e.g. if we were uncleanly killed and restarted) since we'll
    // reacquire
    // them if we actually need them
    for (final TaskLock taskLock : getTaskLocks(toolbox)) {
      toolbox.getTaskActionClient().submit(new LockReleaseAction(taskLock.getInterval()));
    }

    boolean normalExit = true;

    // Set up firehose
    final Period intermediatePersistPeriod = fireDepartmentConfig.getIntermediatePersistPeriod();
    final Firehose firehose = firehoseFactory.connect();

    // It would be nice to get the PlumberSchool in the constructor.  Although that will need
    // jackson injectables for
    // stuff like the ServerView, which seems kind of odd?  Perhaps revisit this when Guice has been
    // introduced.
    final RealtimePlumberSchool realtimePlumberSchool =
        new RealtimePlumberSchool(
            windowPeriod, new File(toolbox.getTaskWorkDir(), "persist"), segmentGranularity);
    realtimePlumberSchool.setDefaultMaxPendingPersists(maxPendingPersists);

    final SegmentPublisher segmentPublisher = new TaskActionSegmentPublisher(this, toolbox);

    // NOTE: We talk to the coordinator in various places in the plumber and we could be more robust
    // to issues
    // with the coordinator.  Right now, we'll block/throw in whatever thread triggered the
    // coordinator behavior,
    // which will typically be either the main data processing loop or the persist thread.

    // Wrap default DataSegmentAnnouncer such that we unlock intervals as we unannounce segments
    final DataSegmentAnnouncer lockingSegmentAnnouncer =
        new DataSegmentAnnouncer() {
          @Override
          public void announceSegment(final DataSegment segment) throws IOException {
            // Side effect: Calling announceSegment causes a lock to be acquired
            toolbox.getTaskActionClient().submit(new LockAcquireAction(segment.getInterval()));
            toolbox.getSegmentAnnouncer().announceSegment(segment);
          }

          @Override
          public void unannounceSegment(final DataSegment segment) throws IOException {
            try {
              toolbox.getSegmentAnnouncer().unannounceSegment(segment);
            } finally {
              toolbox.getTaskActionClient().submit(new LockReleaseAction(segment.getInterval()));
            }
          }

          @Override
          public void announceSegments(Iterable<DataSegment> segments) throws IOException {
            // Side effect: Calling announceSegments causes locks to be acquired
            for (DataSegment segment : segments) {
              toolbox.getTaskActionClient().submit(new LockAcquireAction(segment.getInterval()));
            }
            toolbox.getSegmentAnnouncer().announceSegments(segments);
          }

          @Override
          public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
            try {
              toolbox.getSegmentAnnouncer().unannounceSegments(segments);
            } finally {
              for (DataSegment segment : segments) {
                toolbox.getTaskActionClient().submit(new LockReleaseAction(segment.getInterval()));
              }
            }
          }
        };

    // NOTE: getVersion will block if there is lock contention, which will block plumber.getSink
    // NOTE: (and thus the firehose)

    // Shouldn't usually happen, since we don't expect people to submit tasks that intersect with
    // the
    // realtime window, but if they do it can be problematic. If we decide to care, we can use more
    // threads in
    // the plumber such that waiting for the coordinator doesn't block data processing.
    final VersioningPolicy versioningPolicy =
        new VersioningPolicy() {
          @Override
          public String getVersion(final Interval interval) {
            try {
              // Side effect: Calling getVersion causes a lock to be acquired
              final TaskLock myLock =
                  toolbox.getTaskActionClient().submit(new LockAcquireAction(interval));

              return myLock.getVersion();
            } catch (IOException e) {
              throw Throwables.propagate(e);
            }
          }
        };

    // NOTE: This pusher selects path based purely on global configuration and the DataSegment,
    // which means
    // NOTE: that redundant realtime tasks will upload to the same location. This can cause
    // index.zip and
    // NOTE: descriptor.json to mismatch, or it can cause historical nodes to load different
    // instances of the
    // NOTE: "same" segment.
    realtimePlumberSchool.setDataSegmentPusher(toolbox.getSegmentPusher());
    realtimePlumberSchool.setConglomerate(toolbox.getQueryRunnerFactoryConglomerate());
    realtimePlumberSchool.setQueryExecutorService(toolbox.getQueryExecutorService());
    realtimePlumberSchool.setVersioningPolicy(versioningPolicy);
    realtimePlumberSchool.setSegmentAnnouncer(lockingSegmentAnnouncer);
    realtimePlumberSchool.setSegmentPublisher(segmentPublisher);
    realtimePlumberSchool.setServerView(toolbox.getNewSegmentServerView());
    realtimePlumberSchool.setEmitter(toolbox.getEmitter());

    if (this.rejectionPolicyFactory != null) {
      realtimePlumberSchool.setRejectionPolicyFactory(rejectionPolicyFactory);
    }

    final FireDepartment fireDepartment =
        new FireDepartment(schema, fireDepartmentConfig, null, null);
    final RealtimeMetricsMonitor metricsMonitor =
        new RealtimeMetricsMonitor(ImmutableList.of(fireDepartment));
    this.queryRunnerFactoryConglomerate = toolbox.getQueryRunnerFactoryConglomerate();
    this.plumber = realtimePlumberSchool.findPlumber(schema, fireDepartment.getMetrics());

    try {
      plumber.startJob();

      // Set up metrics emission
      toolbox.getMonitorScheduler().addMonitor(metricsMonitor);

      // Time to read data!
      long nextFlush = new DateTime().plus(intermediatePersistPeriod).getMillis();
      while (firehose.hasMore()) {
        final InputRow inputRow;
        try {
          inputRow = firehose.nextRow();
          if (inputRow == null) {
            continue;
          }

          final Sink sink = plumber.getSink(inputRow.getTimestampFromEpoch());
          if (sink == null) {
            fireDepartment.getMetrics().incrementThrownAway();
            log.debug("Throwing away event[%s]", inputRow);

            if (System.currentTimeMillis() > nextFlush) {
              plumber.persist(firehose.commit());
              nextFlush = new DateTime().plus(intermediatePersistPeriod).getMillis();
            }

            continue;
          }

          if (sink.isEmpty()) {
            log.info("Task %s: New sink: %s", getId(), sink);
          }

          int currCount = sink.add(inputRow);
          fireDepartment.getMetrics().incrementProcessed();
          if (currCount >= fireDepartmentConfig.getMaxRowsInMemory()
              || System.currentTimeMillis() > nextFlush) {
            plumber.persist(firehose.commit());
            nextFlush = new DateTime().plus(intermediatePersistPeriod).getMillis();
          }
        } catch (FormattedException e) {
          log.warn(e, "unparseable line");
          fireDepartment.getMetrics().incrementUnparseable();
        }
      }
    } catch (Throwable e) {
      normalExit = false;
      log.makeAlert(e, "Exception aborted realtime processing[%s]", schema.getDataSource()).emit();
      throw e;
    } finally {
      if (normalExit) {
        try {
          plumber.persist(firehose.commit());
          plumber.finishJob();
        } catch (Exception e) {
          log.makeAlert(e, "Failed to finish realtime task").emit();
        } finally {
          Closeables.closeQuietly(firehose);
          toolbox.getMonitorScheduler().removeMonitor(metricsMonitor);
        }
      }
    }

    return TaskStatus.success(getId());
  }
Exemplo n.º 26
0
  @Before
  public void setUp() throws Exception {
    emitter = EasyMock.createMock(ServiceEmitter.class);
    EmittingLogger.registerEmitter(emitter);
    queryRunnerFactoryConglomerate =
        EasyMock.createStrictMock(QueryRunnerFactoryConglomerate.class);
    monitorScheduler = EasyMock.createStrictMock(MonitorScheduler.class);
    publishCountDown = new CountDownLatch(1);
    announcedSinks = 0;
    pushedSegments = 0;
    tmpDir = temporaryFolder.newFolder();
    TestUtils testUtils = new TestUtils();
    mapper = testUtils.getTestObjectMapper();

    tqc =
        mapper.readValue(
            "{\"startDelay\":\"PT0S\", \"restartDelay\":\"PT1S\", \"storageSyncRate\":\"PT0.5S\"}",
            TaskQueueConfig.class);
    indexSpec = new IndexSpec();

    if (taskStorageType.equals("HeapMemoryTaskStorage")) {
      ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {});
    } else if (taskStorageType.equals("MetadataTaskStorage")) {
      testDerbyConnector = derbyConnectorRule.getConnector();
      mapper.registerSubtypes(
          new NamedType(MockExceptionalFirehoseFactory.class, "mockExcepFirehoseFactory"),
          new NamedType(MockFirehoseFactory.class, "mockFirehoseFactory"));
      testDerbyConnector.createTaskTables();
      testDerbyConnector.createSegmentTable();
      ts =
          new MetadataTaskStorage(
              testDerbyConnector,
              new TaskStorageConfig(null),
              new SQLMetadataStorageActionHandlerFactory(
                  testDerbyConnector,
                  derbyConnectorRule.metadataTablesConfigSupplier().get(),
                  mapper));
    } else {
      throw new RuntimeException(String.format("Unknown task storage type [%s]", taskStorageType));
    }

    serverView =
        new FilteredServerView() {
          @Override
          public void registerSegmentCallback(
              Executor exec, ServerView.SegmentCallback callback, Predicate<DataSegment> filter) {
            segmentCallbacks.add(callback);
          }
        };
    setUpAndStartTaskQueue(
        new DataSegmentPusher() {
          @Override
          public String getPathForHadoop(String dataSource) {
            throw new UnsupportedOperationException();
          }

          @Override
          public DataSegment push(File file, DataSegment segment) throws IOException {
            pushedSegments++;
            return segment;
          }
        });
  }
Exemplo n.º 27
0
  @Before
  public void setUp() throws Exception {
    req = EasyMock.createStrictMock(HttpServletRequest.class);
    supervisorManager = EasyMock.createMock(SupervisorManager.class);
    taskLockbox = EasyMock.createStrictMock(TaskLockbox.class);
    taskLockbox.syncFromStorage();
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.add(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.remove(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();

    // for second Noop Task directly added to deep storage.
    taskLockbox.add(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.remove(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();

    taskActionClientFactory = EasyMock.createStrictMock(TaskActionClientFactory.class);
    EasyMock.expect(taskActionClientFactory.create(EasyMock.<Task>anyObject()))
        .andReturn(null)
        .anyTimes();
    EasyMock.replay(taskLockbox, taskActionClientFactory);

    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    runTaskCountDownLatches = new CountDownLatch[2];
    runTaskCountDownLatches[0] = new CountDownLatch(1);
    runTaskCountDownLatches[1] = new CountDownLatch(1);
    taskCompletionCountDownLatches = new CountDownLatch[2];
    taskCompletionCountDownLatches[0] = new CountDownLatch(1);
    taskCompletionCountDownLatches[1] = new CountDownLatch(1);
    announcementLatch = new CountDownLatch(1);
    IndexerZkConfig indexerZkConfig =
        new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null, null);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(indexerZkConfig.getLeaderLatchPath());
    druidNode = new DruidNode("hey", "what", 1234);
    ServiceEmitter serviceEmitter = new NoopServiceEmitter();
    taskMaster =
        new TaskMaster(
            new TaskQueueConfig(null, new Period(1), null, new Period(10)),
            taskLockbox,
            taskStorage,
            taskActionClientFactory,
            druidNode,
            indexerZkConfig,
            new TaskRunnerFactory<MockTaskRunner>() {
              @Override
              public MockTaskRunner build() {
                return new MockTaskRunner(runTaskCountDownLatches, taskCompletionCountDownLatches);
              }
            },
            curator,
            new NoopServiceAnnouncer() {
              @Override
              public void announce(DruidNode node) {
                announcementLatch.countDown();
              }
            },
            serviceEmitter,
            supervisorManager);
    EmittingLogger.registerEmitter(serviceEmitter);
  }
Exemplo n.º 28
0
 public void removeSegment(DataSegment segment) {
   log.info("Removing Segment[%s]", segment);
   databaseSegmentManager.removeSegment(segment.getDataSource(), segment.getIdentifier());
 }
Exemplo n.º 29
0
  public void moveSegment(
      String from, String to, String segmentName, final LoadPeonCallback callback) {
    try {
      final DruidServer fromServer = serverInventoryView.getInventoryValue(from);
      if (fromServer == null) {
        throw new IAE("Unable to find server [%s]", from);
      }

      final DruidServer toServer = serverInventoryView.getInventoryValue(to);
      if (toServer == null) {
        throw new IAE("Unable to find server [%s]", to);
      }

      if (to.equalsIgnoreCase(from)) {
        throw new IAE(
            "Redundant command to move segment [%s] from [%s] to [%s]", segmentName, from, to);
      }

      final DataSegment segment = fromServer.getSegment(segmentName);
      if (segment == null) {
        throw new IAE("Unable to find segment [%s] on server [%s]", segmentName, from);
      }

      final LoadQueuePeon loadPeon = loadManagementPeons.get(to);
      if (loadPeon == null) {
        throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", to);
      }

      final LoadQueuePeon dropPeon = loadManagementPeons.get(from);
      if (dropPeon == null) {
        throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", from);
      }

      final ServerHolder toHolder = new ServerHolder(toServer, loadPeon);
      if (toHolder.getAvailableSize() < segment.getSize()) {
        throw new IAE(
            "Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.",
            to, segment, segment.getSize(), toHolder.getAvailableSize());
      }

      final String toLoadQueueSegPath =
          ZKPaths.makePath(ZKPaths.makePath(zkPaths.getLoadQueuePath(), to), segmentName);
      final String toServedSegPath =
          ZKPaths.makePath(
              ZKPaths.makePath(
                  serverInventoryView.getInventoryManagerConfig().getInventoryPath(), to),
              segmentName);

      loadPeon.loadSegment(
          segment,
          new LoadPeonCallback() {
            @Override
            protected void execute() {
              try {
                if (curator.checkExists().forPath(toServedSegPath) != null
                    && curator.checkExists().forPath(toLoadQueueSegPath) == null
                    && !dropPeon.getSegmentsToDrop().contains(segment)) {
                  dropPeon.dropSegment(segment, callback);
                } else if (callback != null) {
                  callback.execute();
                }
              } catch (Exception e) {
                throw Throwables.propagate(e);
              }
            }
          });
    } catch (Exception e) {
      log.makeAlert(e, "Exception moving segment %s", segmentName).emit();
      callback.execute();
    }
  }
Exemplo n.º 30
0
  private void becomeLeader() {
    synchronized (lock) {
      if (!started) {
        return;
      }

      log.info("I am the leader of the coordinators, all must bow!");
      try {
        leader = true;
        databaseSegmentManager.start();
        databaseRuleManager.start();
        serverInventoryView.start();
        serviceAnnouncer.announce(self);

        final List<Pair<? extends CoordinatorRunnable, Duration>> coordinatorRunnables =
            Lists.newArrayList();
        dynamicConfigs =
            configManager.watch(
                CoordinatorDynamicConfig.CONFIG_KEY,
                CoordinatorDynamicConfig.class,
                new CoordinatorDynamicConfig.Builder().build());
        coordinatorRunnables.add(
            Pair.of(new CoordinatorHistoricalManagerRunnable(), config.getCoordinatorPeriod()));
        if (indexingServiceClient != null) {
          coordinatorRunnables.add(
              Pair.of(
                  new CoordinatorIndexingServiceRunnable(
                      makeIndexingServiceHelpers(
                          configManager.watch(
                              DatasourceWhitelist.CONFIG_KEY, DatasourceWhitelist.class))),
                  config.getCoordinatorIndexingPeriod()));
        }

        for (final Pair<? extends CoordinatorRunnable, Duration> coordinatorRunnable :
            coordinatorRunnables) {
          ScheduledExecutors.scheduleWithFixedDelay(
              exec,
              config.getCoordinatorStartDelay(),
              coordinatorRunnable.rhs,
              new Callable<ScheduledExecutors.Signal>() {
                private final CoordinatorRunnable theRunnable = coordinatorRunnable.lhs;

                @Override
                public ScheduledExecutors.Signal call() {
                  if (leader) {
                    theRunnable.run();
                  }
                  if (leader) { // (We might no longer be coordinator)
                    return ScheduledExecutors.Signal.REPEAT;
                  } else {
                    return ScheduledExecutors.Signal.STOP;
                  }
                }
              });
        }
      } catch (Exception e) {
        log.makeAlert(e, "Unable to become leader").emit();
        final LeaderLatch oldLatch = createNewLeaderLatch();
        Closeables.closeQuietly(oldLatch);
        try {
          leaderLatch.get().start();
        } catch (Exception e1) {
          // If an exception gets thrown out here, then the coordinator will zombie out 'cause it
          // won't be looking for
          // the latch anymore.  I don't believe it's actually possible for an Exception to throw
          // out here, but
          // Curator likes to have "throws Exception" on methods so it might happen...
          log.makeAlert(e1, "I am a zombie").emit();
        }
      }
    }
  }