Пример #1
0
  @Override
  public void addSegment(DataSegment segment) {
    try {
      log.info("Loading segment %s", segment.getIdentifier());

      try {
        serverManager.loadSegment(segment);
      } catch (Exception e) {
        removeSegment(segment);
        throw new SegmentLoadingException(
            e, "Exception loading segment[%s]", segment.getIdentifier());
      }

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.exists()) {
        try {
          jsonMapper.writeValue(segmentInfoCacheFile, segment);
        } catch (IOException e) {
          removeSegment(segment);
          throw new SegmentLoadingException(
              e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
        }
      }

      try {
        announcer.announceSegment(segment);
      } catch (IOException e) {
        throw new SegmentLoadingException(
            e, "Failed to announce segment[%s]", segment.getIdentifier());
      }

    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segment for dataSource").addData("segment", segment).emit();
    }
  }
Пример #2
0
  private void loadCache() {
    File baseDir = config.getInfoDir();
    if (!baseDir.exists()) {
      return;
    }

    List<DataSegment> cachedSegments = Lists.newArrayList();
    for (File file : baseDir.listFiles()) {
      log.info("Loading segment cache file [%s]", file);
      try {
        DataSegment segment = jsonMapper.readValue(file, DataSegment.class);
        if (serverManager.isSegmentCached(segment)) {
          cachedSegments.add(segment);
        } else {
          log.warn(
              "Unable to find cache file for %s. Deleting lookup entry", segment.getIdentifier());

          File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
          if (!segmentInfoCacheFile.delete()) {
            log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
          }
        }
      } catch (Exception e) {
        log.makeAlert(e, "Failed to load segment from segmentInfo file")
            .addData("file", file)
            .emit();
      }
    }

    addSegments(cachedSegments);
  }
Пример #3
0
  @Test
  public void testV1Serialization() throws Exception {

    final Interval interval = new Interval("2011-10-01/2011-10-02");
    final ImmutableMap<String, Object> loadSpec =
        ImmutableMap.<String, Object>of("something", "or_other");

    DataSegment segment =
        new DataSegment(
            "something",
            interval,
            "1",
            loadSpec,
            Arrays.asList("dim1", "dim2"),
            Arrays.asList("met1", "met2"),
            NoneShardSpec.instance(),
            IndexIO.CURRENT_VERSION_ID,
            1);

    final Map<String, Object> objectMap =
        mapper.readValue(
            mapper.writeValueAsString(segment), new TypeReference<Map<String, Object>>() {});

    Assert.assertEquals(10, objectMap.size());
    Assert.assertEquals("something", objectMap.get("dataSource"));
    Assert.assertEquals(interval.toString(), objectMap.get("interval"));
    Assert.assertEquals("1", objectMap.get("version"));
    Assert.assertEquals(loadSpec, objectMap.get("loadSpec"));
    Assert.assertEquals("dim1,dim2", objectMap.get("dimensions"));
    Assert.assertEquals("met1,met2", objectMap.get("metrics"));
    Assert.assertEquals(ImmutableMap.of("type", "none"), objectMap.get("shardSpec"));
    Assert.assertEquals(IndexIO.CURRENT_VERSION_ID, objectMap.get("binaryVersion"));
    Assert.assertEquals(1, objectMap.get("size"));

    DataSegment deserializedSegment =
        mapper.readValue(mapper.writeValueAsString(segment), DataSegment.class);

    Assert.assertEquals(segment.getDataSource(), deserializedSegment.getDataSource());
    Assert.assertEquals(segment.getInterval(), deserializedSegment.getInterval());
    Assert.assertEquals(segment.getVersion(), deserializedSegment.getVersion());
    Assert.assertEquals(segment.getLoadSpec(), deserializedSegment.getLoadSpec());
    Assert.assertEquals(segment.getDimensions(), deserializedSegment.getDimensions());
    Assert.assertEquals(segment.getMetrics(), deserializedSegment.getMetrics());
    Assert.assertEquals(segment.getShardSpec(), deserializedSegment.getShardSpec());
    Assert.assertEquals(segment.getSize(), deserializedSegment.getSize());
    Assert.assertEquals(segment.getIdentifier(), deserializedSegment.getIdentifier());

    deserializedSegment = mapper.readValue(mapper.writeValueAsString(segment), DataSegment.class);
    Assert.assertEquals(0, segment.compareTo(deserializedSegment));

    deserializedSegment = mapper.readValue(mapper.writeValueAsString(segment), DataSegment.class);
    Assert.assertEquals(0, deserializedSegment.compareTo(segment));

    deserializedSegment = mapper.readValue(mapper.writeValueAsString(segment), DataSegment.class);
    Assert.assertEquals(segment.hashCode(), deserializedSegment.hashCode());
  }
Пример #4
0
  public void addSegments(Iterable<DataSegment> segments) {
    try {
      final List<String> segmentFailures = Lists.newArrayList();
      final List<DataSegment> validSegments = Lists.newArrayList();

      for (DataSegment segment : segments) {
        log.info("Loading segment %s", segment.getIdentifier());

        try {
          serverManager.loadSegment(segment);
        } catch (Exception e) {
          log.error(e, "Exception loading segment[%s]", segment.getIdentifier());
          removeSegment(segment);
          segmentFailures.add(segment.getIdentifier());
          continue;
        }

        File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
        if (!segmentInfoCacheFile.exists()) {
          try {
            jsonMapper.writeValue(segmentInfoCacheFile, segment);
          } catch (IOException e) {
            log.error(
                e, "Failed to write to disk segment info cache file[%s]", segmentInfoCacheFile);
            removeSegment(segment);
            segmentFailures.add(segment.getIdentifier());
            continue;
          }
        }

        validSegments.add(segment);
      }

      try {
        announcer.announceSegments(validSegments);
      } catch (IOException e) {
        throw new SegmentLoadingException(e, "Failed to announce segments[%s]", segments);
      }

      if (!segmentFailures.isEmpty()) {
        for (String segmentFailure : segmentFailures) {
          log.error("%s failed to load", segmentFailure);
        }
        throw new SegmentLoadingException(
            "%,d errors seen while loading segments", segmentFailures.size());
      }
    } catch (SegmentLoadingException e) {
      log.makeAlert(e, "Failed to load segments for dataSource")
          .addData("segments", segments)
          .emit();
    }
  }
Пример #5
0
  private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) {

    String segmentId = segment.getIdentifier();
    synchronized (lock) {
      log.debug("Adding segment[%s] for server[%s]", segment, server);

      ServerSelector selector = selectors.get(segmentId);
      if (selector == null) {
        selector = new ServerSelector(segment, tierSelectorStrategy);

        VersionedIntervalTimeline<String, ServerSelector> timeline =
            timelines.get(segment.getDataSource());
        if (timeline == null) {
          timeline = new VersionedIntervalTimeline<>(Ordering.natural());
          timelines.put(segment.getDataSource(), timeline);
        }

        timeline.add(
            segment.getInterval(),
            segment.getVersion(),
            segment.getShardSpec().createChunk(selector));
        selectors.put(segmentId, selector);
      }

      QueryableDruidServer queryableDruidServer = clients.get(server.getName());
      if (queryableDruidServer == null) {
        queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
      }
      selector.addServerAndUpdateSegment(queryableDruidServer, segment);
    }
  }
Пример #6
0
  @Override
  public void kill(DataSegment segment) throws SegmentLoadingException {
    final File path = getPath(segment);
    log.info("killing segment[%s] mapped to path[%s]", segment.getIdentifier(), path);

    try {
      if (path.getName().endsWith(".zip")) {

        // path format -- > .../dataSource/interval/version/partitionNum/xxx.zip
        File partitionNumDir = path.getParentFile();
        FileUtils.deleteDirectory(partitionNumDir);

        // try to delete other directories if possible
        File versionDir = partitionNumDir.getParentFile();
        if (versionDir.delete()) {
          File intervalDir = versionDir.getParentFile();
          if (intervalDir.delete()) {
            File dataSourceDir = intervalDir.getParentFile();
            dataSourceDir.delete();
          }
        }
      } else {
        throw new SegmentLoadingException("Unknown file type[%s]", path);
      }
    } catch (IOException e) {
      throw new SegmentLoadingException(e, "Unable to kill segment");
    }
  }
Пример #7
0
  protected void addSingleInventory(final DruidServer container, final DataSegment inventory) {
    log.info("Server[%s] added segment[%s]", container.getName(), inventory.getIdentifier());

    if (container.getSegment(inventory.getIdentifier()) != null) {
      log.warn(
          "Not adding or running callbacks for existing segment[%s] on server[%s]",
          inventory.getIdentifier(), container.getName());

      return;
    }

    container.addDataSegment(inventory.getIdentifier(), inventory);

    runSegmentCallbacks(
        new Function<SegmentCallback, CallbackAction>() {
          @Override
          public CallbackAction apply(SegmentCallback input) {
            return input.segmentAdded(container.getMetadata(), inventory);
          }
        });
  }
Пример #8
0
  @Test
  public void testIdentifierWithNonzeroPartition() {
    final DataSegment segment =
        DataSegment.builder()
            .dataSource("foo")
            .interval(new Interval("2012-01-01/2012-01-02"))
            .version(new DateTime("2012-01-01T11:22:33.444Z").toString())
            .shardSpec(new SingleDimensionShardSpec("bar", "abc", "def", 1))
            .build();

    Assert.assertEquals(
        "foo_2012-01-01T00:00:00.000Z_2012-01-02T00:00:00.000Z_2012-01-01T11:22:33.444Z_1",
        segment.getIdentifier());
  }
Пример #9
0
  @Test
  public void testIdentifier() {
    final DataSegment segment =
        DataSegment.builder()
            .dataSource("foo")
            .interval(new Interval("2012-01-01/2012-01-02"))
            .version(new DateTime("2012-01-01T11:22:33.444Z").toString())
            .shardSpec(NoneShardSpec.instance())
            .build();

    Assert.assertEquals(
        "foo_2012-01-01T00:00:00.000Z_2012-01-02T00:00:00.000Z_2012-01-01T11:22:33.444Z",
        segment.getIdentifier());
  }
Пример #10
0
  @Override
  public void removeSegment(DataSegment segment) {
    try {
      serverManager.dropSegment(segment);

      File segmentInfoCacheFile = new File(config.getInfoDir(), segment.getIdentifier());
      if (!segmentInfoCacheFile.delete()) {
        log.warn("Unable to delete segmentInfoCacheFile[%s]", segmentInfoCacheFile);
      }

      announcer.unannounceSegment(segment);
    } catch (Exception e) {
      log.makeAlert(e, "Failed to remove segment").addData("segment", segment).emit();
    }
  }
Пример #11
0
    @Override
    public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
      DatasourceWhitelist whitelist = whitelistRef.get();

      for (DataSegment dataSegment : params.getAvailableSegments()) {
        if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
          final Integer binaryVersion = dataSegment.getBinaryVersion();

          if (binaryVersion == null || binaryVersion < IndexIO.CURRENT_VERSION_ID) {
            log.info("Upgrading version on segment[%s]", dataSegment.getIdentifier());
            indexingServiceClient.upgradeSegment(dataSegment);
          }
        }
      }

      return params;
    }
Пример #12
0
  private void serverRemovedSegment(DruidServerMetadata server, DataSegment segment) {

    String segmentId = segment.getIdentifier();
    final ServerSelector selector;

    synchronized (lock) {
      log.debug("Removing segment[%s] from server[%s].", segmentId, server);

      selector = selectors.get(segmentId);
      if (selector == null) {
        log.warn("Told to remove non-existant segment[%s]", segmentId);
        return;
      }

      QueryableDruidServer queryableDruidServer = clients.get(server.getName());
      if (!selector.removeServer(queryableDruidServer)) {
        log.warn(
            "Asked to disassociate non-existant association between server[%s] and segment[%s]",
            server, segmentId);
      }

      if (selector.isEmpty()) {
        VersionedIntervalTimeline<String, ServerSelector> timeline =
            timelines.get(segment.getDataSource());
        selectors.remove(segmentId);

        final PartitionChunk<ServerSelector> removedPartition =
            timeline.remove(
                segment.getInterval(),
                segment.getVersion(),
                segment.getShardSpec().createChunk(selector));

        if (removedPartition == null) {
          log.warn(
              "Asked to remove timeline entry[interval: %s, version: %s] that doesn't exist",
              segment.getInterval(), segment.getVersion());
        }
      }
    }
  }
Пример #13
0
  @Override
  public void getSegmentFiles(final DataSegment segment, final File outDir)
      throws SegmentLoadingException {
    final S3Coords s3Coords = new S3Coords(segment);

    log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);

    if (!isObjectInBucket(s3Coords)) {
      throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
    }

    if (!outDir.exists()) {
      outDir.mkdirs();
    }

    if (!outDir.isDirectory()) {
      throw new ISE("outDir[%s] must be a directory.", outDir);
    }

    try {
      S3Utils.retryS3Operation(
          new Callable<Void>() {
            @Override
            public Void call() throws Exception {
              long startTime = System.currentTimeMillis();
              S3Object s3Obj = null;

              try {
                s3Obj = s3Client.getObject(s3Coords.bucket, s3Coords.path);

                try (InputStream in = s3Obj.getDataInputStream()) {
                  final String key = s3Obj.getKey();
                  if (key.endsWith(".zip")) {
                    CompressionUtils.unzip(in, outDir);
                  } else if (key.endsWith(".gz")) {
                    final File outFile = new File(outDir, toFilename(key, ".gz"));
                    ByteStreams.copy(
                        new GZIPInputStream(in), Files.newOutputStreamSupplier(outFile));
                  } else {
                    ByteStreams.copy(
                        in, Files.newOutputStreamSupplier(new File(outDir, toFilename(key, ""))));
                  }
                  log.info(
                      "Pull of file[%s] completed in %,d millis",
                      s3Obj, System.currentTimeMillis() - startTime);
                  return null;
                } catch (IOException e) {
                  throw new IOException(
                      String.format("Problem decompressing object[%s]", s3Obj), e);
                }
              } finally {
                S3Utils.closeStreamsQuietly(s3Obj);
              }
            }
          });
    } catch (Exception e) {
      try {
        FileUtils.deleteDirectory(outDir);
      } catch (IOException ioe) {
        log.warn(
            ioe,
            "Failed to remove output directory for segment[%s] after exception: %s",
            segment.getIdentifier(),
            outDir);
      }
      throw new SegmentLoadingException(e, e.getMessage());
    }
  }
Пример #14
0
 public void removeSegment(DataSegment segment) {
   log.info("Removing Segment[%s]", segment);
   databaseSegmentManager.removeSegment(segment.getDataSource(), segment.getIdentifier());
 }
Пример #15
0
 public boolean isServingSegment(DataSegment segment) {
   return (server.getSegment(segment.getIdentifier()) != null);
 }