@SuppressWarnings("unchecked")
  public void poll() {
    try {
      String url = getRuleURL();
      if (url == null) {
        return;
      }

      FullResponseHolder response =
          httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();

      if (response.getStatus().equals(HttpResponseStatus.FOUND)) {
        url = response.getResponse().headers().get("Location");
        log.info("Redirecting rule request to [%s]", url);
        response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
      }

      ConcurrentHashMap<String, List<Rule>> newRules =
          new ConcurrentHashMap<>(
              (Map<String, List<Rule>>)
                  jsonMapper.readValue(
                      response.getContent(), new TypeReference<Map<String, List<Rule>>>() {}));

      log.info("Got [%,d] rules", newRules.keySet().size());

      rules.set(newRules);
    } catch (Exception e) {
      log.error(e, "Exception while polling for rules");
    }
  }
Example #2
0
  private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) {

    String segmentId = segment.getIdentifier();
    synchronized (lock) {
      log.debug("Adding segment[%s] for server[%s]", segment, server);

      ServerSelector selector = selectors.get(segmentId);
      if (selector == null) {
        selector = new ServerSelector(segment, tierSelectorStrategy);

        VersionedIntervalTimeline<String, ServerSelector> timeline =
            timelines.get(segment.getDataSource());
        if (timeline == null) {
          timeline = new VersionedIntervalTimeline<>(Ordering.natural());
          timelines.put(segment.getDataSource(), timeline);
        }

        timeline.add(
            segment.getInterval(),
            segment.getVersion(),
            segment.getShardSpec().createChunk(selector));
        selectors.put(segmentId, selector);
      }

      QueryableDruidServer queryableDruidServer = clients.get(server.getName());
      if (queryableDruidServer == null) {
        queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
      }
      selector.addServerAndUpdateSegment(queryableDruidServer, segment);
    }
  }
  @Override
  public void kill(DataSegment segment) throws SegmentLoadingException {
    final File path = getPath(segment);
    log.info("killing segment[%s] mapped to path[%s]", segment.getIdentifier(), path);

    try {
      if (path.getName().endsWith(".zip")) {

        // path format -- > .../dataSource/interval/version/partitionNum/xxx.zip
        File partitionNumDir = path.getParentFile();
        FileUtils.deleteDirectory(partitionNumDir);

        // try to delete other directories if possible
        File versionDir = partitionNumDir.getParentFile();
        if (versionDir.delete()) {
          File intervalDir = versionDir.getParentFile();
          if (intervalDir.delete()) {
            File dataSourceDir = intervalDir.getParentFile();
            dataSourceDir.delete();
          }
        }
      } else {
        throw new SegmentLoadingException("Unknown file type[%s]", path);
      }
    } catch (IOException e) {
      throw new SegmentLoadingException(e, "Unable to kill segment");
    }
  }
Example #4
0
  private QueryableDruidServer addServer(DruidServer server) {
    QueryableDruidServer retVal = new QueryableDruidServer(server, makeDirectClient(server));
    QueryableDruidServer exists = clients.put(server.getName(), retVal);
    if (exists != null) {
      log.warn("QueryRunner for server[%s] already existed!? Well it's getting replaced", server);
    }

    return retVal;
  }
Example #5
0
 @Override
 public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
   synchronized (lock) {
     QueryableDruidServer queryableDruidServer = clients.get(server.getName());
     if (queryableDruidServer == null) {
       log.error("WTF?! No QueryableDruidServer found for %s", server.getName());
       return null;
     }
     return queryableDruidServer.getClient();
   }
 }
Example #6
0
 @Override
 public void pushTaskLog(final String taskid, File file) throws IOException {
   if (config.getDirectory().exists() || config.getDirectory().mkdirs()) {
     final File outputFile = fileForTask(taskid);
     Files.copy(file, outputFile);
     log.info("Wrote task log to: %s", outputFile);
   } else {
     throw new IOException(
         String.format("Unable to create task log dir[%s]", config.getDirectory()));
   }
 }
Example #7
0
  private void serverRemovedSegment(DruidServerMetadata server, DataSegment segment) {

    String segmentId = segment.getIdentifier();
    final ServerSelector selector;

    synchronized (lock) {
      log.debug("Removing segment[%s] from server[%s].", segmentId, server);

      selector = selectors.get(segmentId);
      if (selector == null) {
        log.warn("Told to remove non-existant segment[%s]", segmentId);
        return;
      }

      QueryableDruidServer queryableDruidServer = clients.get(server.getName());
      if (!selector.removeServer(queryableDruidServer)) {
        log.warn(
            "Asked to disassociate non-existant association between server[%s] and segment[%s]",
            server, segmentId);
      }

      if (selector.isEmpty()) {
        VersionedIntervalTimeline<String, ServerSelector> timeline =
            timelines.get(segment.getDataSource());
        selectors.remove(segmentId);

        final PartitionChunk<ServerSelector> removedPartition =
            timeline.remove(
                segment.getInterval(),
                segment.getVersion(),
                segment.getShardSpec().createChunk(selector));

        if (removedPartition == null) {
          log.warn(
              "Asked to remove timeline entry[interval: %s, version: %s] that doesn't exist",
              segment.getInterval(), segment.getVersion());
        }
      }
    }
  }
Example #8
0
  public Long getAvailableSize() {
    long maxSize = getMaxSize();
    long sizeUsed = getSizeUsed();
    long availableSize = maxSize - sizeUsed;

    log.debug(
        "Server[%s], MaxSize[%,d], CurrSize[%,d], QueueSize[%,d], SizeUsed[%,d], AvailableSize[%,d]",
        server.getName(),
        maxSize,
        getCurrServerSize(),
        getLoadQueueSize(),
        sizeUsed,
        availableSize);

    return availableSize;
  }
  @Inject
  public OffHeapNamespaceExtractionCacheManager(
      Lifecycle lifecycle,
      ServiceEmitter emitter,
      final Map<Class<? extends ExtractionNamespace>, ExtractionNamespaceCacheFactory<?>>
          namespaceFunctionFactoryMap) {
    super(lifecycle, emitter, namespaceFunctionFactoryMap);
    try {
      tmpFile = File.createTempFile("druidMapDB", getClass().getCanonicalName());
      log.info("Using file [%s] for mapDB off heap namespace cache", tmpFile.getAbsolutePath());
    } catch (IOException e) {
      throw Throwables.propagate(e);
    }
    mmapDB =
        DBMaker.newFileDB(tmpFile)
            .closeOnJvmShutdown()
            .transactionDisable()
            .deleteFilesAfterClose()
            .strictDBGet()
            .asyncWriteEnable()
            .mmapFileEnable()
            .commitFileSyncDisable()
            .cacheSize(10_000_000)
            .make();
    try {
      lifecycle.addMaybeStartHandler(
          new Lifecycle.Handler() {
            @Override
            public void start() throws Exception {
              // NOOP
            }

            @Override
            public synchronized void stop() {
              if (!mmapDB.isClosed()) {
                mmapDB.close();
                if (!tmpFile.delete()) {
                  log.warn("Unable to delete file at [%s]", tmpFile.getAbsolutePath());
                }
              }
            }
          });
    } catch (Exception e) {
      throw Throwables.propagate(e);
    }
  }
  private String getRuleURL() throws URISyntaxException {
    Server server = selector.pick();

    if (server == null) {
      log.error("No instances found for [%s]!", config.get().getCoordinatorServiceName());
      return null;
    }

    return new URI(
            server.getScheme(),
            null,
            server.getAddress(),
            server.getPort(),
            config.get().getRulesEndpoint(),
            null,
            null)
        .toString();
  }
 @Override
 public boolean delete(final String namespaceKey) {
   // `super.delete` has a synchronization in it, don't call it in the lock.
   if (!super.delete(namespaceKey)) {
     return false;
   }
   final Lock lock = nsLocks.get(namespaceKey);
   lock.lock();
   try {
     final String mmapDBkey = currentNamespaceCache.remove(namespaceKey);
     if (mmapDBkey == null) {
       return false;
     }
     final long pre = tmpFile.length();
     mmapDB.delete(mmapDBkey);
     log.debug("MapDB file size: pre %d  post %d", pre, tmpFile.length());
     return true;
   } finally {
     lock.unlock();
   }
 }
Example #12
0
 @Override
 public void killAll() throws IOException {
   log.info("Deleting all segment files from local dir [%s].", storageDirectory.getAbsolutePath());
   FileUtils.deleteDirectory(storageDirectory);
 }