@Override
  public void configure(Binder binder) {
    // Instantiate eagerly so that we get everything registered and put into the Lifecycle
    // This makes the shutdown run pretty darn near last.

    try {
      // Reflection to try and allow non Log4j2 stuff to run. This acts as a gateway to stop errors
      // in the next few lines
      final Class<?> logManagerClazz = Class.forName("org.apache.logging.log4j.LogManager");

      final LoggerContextFactory contextFactory = LogManager.getFactory();
      if (!(contextFactory instanceof Log4jContextFactory)) {
        log.warn(
            "Expected [%s] found [%s]. Unknown class for context factory. Not logging shutdown",
            Log4jContextFactory.class.getCanonicalName(),
            contextFactory.getClass().getCanonicalName());
        return;
      }
      final ShutdownCallbackRegistry registry =
          ((Log4jContextFactory) contextFactory).getShutdownCallbackRegistry();
      if (!(registry instanceof Log4jShutdown)) {
        log.warn(
            "Shutdown callback registry expected class [%s] found [%s]. Skipping shutdown registry",
            Log4jShutdown.class.getCanonicalName(), registry.getClass().getCanonicalName());
        return;
      }
      binder.bind(Log4jShutdown.class).toInstance((Log4jShutdown) registry);
      binder
          .bind(Key.get(Log4jShutterDowner.class, Names.named("ForTheEagerness")))
          .to(Log4jShutterDowner.class)
          .asEagerSingleton();
    } catch (ClassNotFoundException | ClassCastException | LinkageError e) {
      log.warn(e, "Not registering log4j shutdown hooks. Not using log4j?");
    }
  }
  // return value means actually schedule or not
  public boolean scheduleOrUpdate(final String id, ExtractionNamespace namespace) {
    final NamespaceImplData implDatum = implData.get(id);
    if (implDatum == null) {
      // New, probably
      schedule(id, namespace);
      return true;
    }
    if (!implDatum.enabled.get()) {
      // Race condition. Someone else disabled it first, go ahead and reschedule
      schedule(id, namespace);
      return true;
    }

    // Live one. Check if it needs updated
    if (implDatum.namespace.equals(namespace)) {
      // skip if no update
      return false;
    }
    if (log.isDebugEnabled()) {
      log.debug("Namespace [%s] needs updated to [%s]", implDatum.namespace, namespace);
    }
    removeNamespaceLocalMetadata(implDatum);
    schedule(id, namespace);
    return true;
  }
 @Override
 public byte[] get(NamedKey key) {
   try (ResourceHolder<MemcachedClientIF> clientHolder = client.get()) {
     Future<Object> future;
     try {
       future = clientHolder.get().asyncGet(computeKeyHash(memcachedPrefix, key));
     } catch (IllegalStateException e) {
       // operation did not get queued in time (queue is full)
       errorCount.incrementAndGet();
       log.warn(e, "Unable to queue cache operation");
       return null;
     }
     try {
       byte[] bytes = (byte[]) future.get(timeout, TimeUnit.MILLISECONDS);
       if (bytes != null) {
         hitCount.incrementAndGet();
       } else {
         missCount.incrementAndGet();
       }
       return bytes == null ? null : deserializeValue(key, bytes);
     } catch (TimeoutException e) {
       timeoutCount.incrementAndGet();
       future.cancel(false);
       return null;
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw Throwables.propagate(e);
     } catch (ExecutionException e) {
       errorCount.incrementAndGet();
       log.warn(e, "Exception pulling item from cache");
       return null;
     }
   }
 }
  public FileUtils.FileCopyResult getSegmentFiles(
      String region, String container, String path, File outDir) throws SegmentLoadingException {
    CloudFilesObjectApiProxy objectApi =
        new CloudFilesObjectApiProxy(cloudFilesApi, region, container);
    final CloudFilesByteSource byteSource = new CloudFilesByteSource(objectApi, path);

    try {
      final FileUtils.FileCopyResult result =
          CompressionUtils.unzip(byteSource, outDir, CloudFilesUtils.CLOUDFILESRETRY, true);
      log.info("Loaded %d bytes from [%s] to [%s]", result.size(), path, outDir.getAbsolutePath());
      return result;
    } catch (Exception e) {
      try {
        org.apache.commons.io.FileUtils.deleteDirectory(outDir);
      } catch (IOException ioe) {
        log.warn(
            ioe,
            "Failed to remove output directory [%s] for segment pulled from [%s]",
            outDir.getAbsolutePath(),
            path);
      }
      throw new SegmentLoadingException(e, e.getMessage());
    } finally {
      try {
        byteSource.closeStream();
      } catch (IOException ioe) {
        log.warn(ioe, "Failed to close payload for segmente pulled from [%s]", path);
      }
    }
  }
  public void testQueriesFromFile(String filePath, int timesToRun) throws Exception {
    LOG.info("Starting query tests for [%s]", filePath);
    List<QueryWithResults> queries =
        jsonMapper.readValue(
            FromFileTestQueryHelper.class.getResourceAsStream(filePath),
            new TypeReference<List<QueryWithResults>>() {});
    for (int i = 0; i < timesToRun; i++) {
      LOG.info("Starting Iteration " + i);

      boolean failed = false;
      for (QueryWithResults queryWithResult : queries) {
        LOG.info("Running Query " + queryWithResult.getQuery().getType());
        List<Map<String, Object>> result = queryClient.query(queryWithResult.getQuery());
        if (!QueryResultVerifier.compareResults(result, queryWithResult.getExpectedResults())) {
          LOG.error(
              "Failed while executing %s actualResults : %s",
              queryWithResult, jsonMapper.writeValueAsString(result));
          failed = true;
        } else {
          LOG.info("Results Verified for Query " + queryWithResult.getQuery().getType());
        }
      }

      if (failed) {
        throw new ISE("one or more twitter  queries failed");
      }
    }
  }
  public boolean scheduleAndWait(
      final String id, ExtractionNamespace namespace, long waitForFirstRun) {
    if (scheduleOrUpdate(id, namespace)) {
      log.debug("Scheduled new namespace [%s]: %s", id, namespace);
    } else {
      log.debug("Namespace [%s] already running: %s", id, namespace);
    }

    final NamespaceImplData namespaceImplData = implData.get(id);
    if (namespaceImplData == null) {
      log.warn("NamespaceLookupExtractorFactory[%s] - deleted during start", id);
      return false;
    }

    boolean success = false;
    try {
      success = namespaceImplData.firstRun.await(waitForFirstRun, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
      log.error(e, "NamespaceLookupExtractorFactory[%s] - interrupted during start", id);
    }
    if (!success) {
      delete(id);
    }
    return success;
  }
Example #7
0
  public static boolean runJobs(List<Jobby> jobs, HadoopDruidIndexerConfig config) {
    String failedMessage = null;
    for (Jobby job : jobs) {
      if (failedMessage == null) {
        if (!job.run()) {
          failedMessage = String.format("Job[%s] failed!", job.getClass());
        }
      }
    }

    if (!config.getSchema().getTuningConfig().isLeaveIntermediate()) {
      if (failedMessage == null || config.getSchema().getTuningConfig().isCleanupOnFailure()) {
        Path workingPath = config.makeIntermediatePath();
        log.info("Deleting path[%s]", workingPath);
        try {
          workingPath
              .getFileSystem(injectSystemProperties(new Configuration()))
              .delete(workingPath, true);
        } catch (IOException e) {
          log.error(e, "Failed to cleanup path[%s]", workingPath);
        }
      }
    }

    if (failedMessage != null) {
      throw new ISE(failedMessage);
    }

    return true;
  }
Example #8
0
 @Override
 public void log(Request request, Response response) {
   if (logger.isDebugEnabled()) {
     logger.debug(
         "%s %s %s",
         request.getMethod(), request.getUri().toString(), request.getProtocol().toString());
   }
 }
Example #9
0
 @Override
 public void stop() {
   log.info("Stopping object[%s]", o);
   try {
     stopMethod.invoke(o);
   } catch (Exception e) {
     log.error(e, "Unable to invoke stopMethod() on %s", o.getClass());
   }
 }
 @LifecycleStop
 public void stop() {
   if (log4jShutdown != null) {
     log.debug("Shutting down log4j");
     log4jShutdown.stop();
   } else {
     log.warn("Log4j shutdown was registered in lifecycle but no shutdown object exists!");
   }
 }
Example #11
0
 public synchronized void close() {
   if (isOpen.getAndSet(false)) {
     LOGGER.info("Closing loading cache [%s]", id);
     loadingCache.close();
     reverseLoadingCache.close();
   } else {
     LOGGER.info("Closing already closed lookup");
     return;
   }
 }
  @Override
  public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
    try (ResourceHolder<MemcachedClientIF> clientHolder = client.get()) {
      Map<String, NamedKey> keyLookup =
          Maps.uniqueIndex(
              keys,
              new Function<NamedKey, String>() {
                @Override
                public String apply(@Nullable NamedKey input) {
                  return computeKeyHash(memcachedPrefix, input);
                }
              });

      Map<NamedKey, byte[]> results = Maps.newHashMap();

      BulkFuture<Map<String, Object>> future;
      try {
        future = clientHolder.get().asyncGetBulk(keyLookup.keySet());
      } catch (IllegalStateException e) {
        // operation did not get queued in time (queue is full)
        errorCount.incrementAndGet();
        log.warn(e, "Unable to queue cache operation");
        return results;
      }

      try {
        Map<String, Object> some = future.getSome(timeout, TimeUnit.MILLISECONDS);

        if (future.isTimeout()) {
          future.cancel(false);
          timeoutCount.incrementAndGet();
        }
        missCount.addAndGet(keyLookup.size() - some.size());
        hitCount.addAndGet(some.size());

        for (Map.Entry<String, Object> entry : some.entrySet()) {
          final NamedKey key = keyLookup.get(entry.getKey());
          final byte[] value = (byte[]) entry.getValue();
          if (value != null) {
            results.put(key, deserializeValue(key, value));
          }
        }

        return results;
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw Throwables.propagate(e);
      } catch (ExecutionException e) {
        errorCount.incrementAndGet();
        log.warn(e, "Exception pulling item from cache");
        return results;
      }
    }
  }
  @Override
  public void getSegmentFiles(DataSegment segment, File outDir) throws SegmentLoadingException {
    S3Coords s3Coords = new S3Coords(segment);

    log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir);

    if (!isObjectInBucket(s3Coords)) {
      throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords);
    }

    if (!outDir.exists()) {
      outDir.mkdirs();
    }

    if (!outDir.isDirectory()) {
      throw new ISE("outDir[%s] must be a directory.", outDir);
    }

    long startTime = System.currentTimeMillis();
    S3Object s3Obj = null;

    try {
      s3Obj = s3Client.getObject(new S3Bucket(s3Coords.bucket), s3Coords.path);

      InputStream in = null;
      try {
        in = s3Obj.getDataInputStream();
        final String key = s3Obj.getKey();
        if (key.endsWith(".zip")) {
          CompressionUtils.unzip(in, outDir);
        } else if (key.endsWith(".gz")) {
          final File outFile = new File(outDir, toFilename(key, ".gz"));
          ByteStreams.copy(new GZIPInputStream(in), Files.newOutputStreamSupplier(outFile));
        } else {
          ByteStreams.copy(
              in, Files.newOutputStreamSupplier(new File(outDir, toFilename(key, ""))));
        }
        log.info(
            "Pull of file[%s] completed in %,d millis",
            s3Obj, System.currentTimeMillis() - startTime);
      } catch (IOException e) {
        FileUtils.deleteDirectory(outDir);
        throw new SegmentLoadingException(e, "Problem decompressing object[%s]", s3Obj);
      } finally {
        Closeables.closeQuietly(in);
      }
    } catch (Exception e) {
      throw new SegmentLoadingException(e, e.getMessage());
    } finally {
      S3Utils.closeStreamsQuietly(s3Obj);
    }
  }
 /**
  * Clears out resources used by the namespace such as threads. Implementations may override this
  * and call super.delete(...) if they have resources of their own which need cleared.
  *
  * <p>This particular method is NOT thread safe, and any impl which is intended to be thread safe
  * should safe-guard calls to this method.
  *
  * @param ns The namespace to be deleted
  * @return True if a deletion occurred, false if no deletion occurred.
  * @throws ISE if there is an error cancelling the namespace's future task
  */
 public boolean delete(final String ns) {
   final NamespaceImplData implDatum = implData.get(ns);
   final boolean deleted = removeNamespaceLocalMetadata(implDatum);
   // At this point we have won leader election on canceling this implDatum
   if (deleted) {
     log.info("Deleting namespace [%s]", ns);
     lastVersion.remove(implDatum.name);
     return true;
   } else {
     log.debug("Did not delete namespace [%s]", ns);
     return false;
   }
 }
 public static boolean isInstanceReady(ServerDiscoverySelector serviceProvider) {
   try {
     Server instance = serviceProvider.pick();
     if (instance == null) {
       LOG.warn("Unable to find a host");
       return false;
     }
   } catch (Exception e) {
     LOG.error(e, "Caught exception waiting for host");
     return false;
   }
   return true;
 }
Example #16
0
 @Override
 public void stop() {
   for (Method method : o.getClass().getMethods()) {
     if (method.getAnnotation(LifecycleStop.class) != null) {
       log.info("Invoking stop method[%s] on object[%s].", method, o);
       try {
         method.invoke(o);
       } catch (Exception e) {
         log.error(e, "Exception when stopping method[%s] on object[%s]", method, o);
       }
     }
   }
 }
Example #17
0
  @Override
  public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException {
    final Set<Interval> intervals = Sets.newTreeSet(Comparators.intervals());
    Optional<Set<Interval>> optionalIntervals = config.getSegmentGranularIntervals();
    if (optionalIntervals.isPresent()) {
      for (Interval segmentInterval : optionalIntervals.get()) {
        for (Interval dataInterval : dataGranularity.getIterable(segmentInterval)) {
          intervals.add(dataInterval);
        }
      }
    }

    Path betaInput = new Path(inputPath);
    FileSystem fs = betaInput.getFileSystem(job.getConfiguration());
    Set<String> paths = Sets.newTreeSet();
    Pattern fileMatcher = Pattern.compile(filePattern);

    DateTimeFormatter customFormatter = null;
    if (pathFormat != null) {
      customFormatter = DateTimeFormat.forPattern(pathFormat);
    }

    for (Interval interval : intervals) {
      DateTime t = interval.getStart();
      String intervalPath = null;
      if (customFormatter != null) {
        intervalPath = customFormatter.print(t);
      } else {
        intervalPath = dataGranularity.toPath(t);
      }

      Path granularPath = new Path(betaInput, intervalPath);
      log.info("Checking path[%s]", granularPath);
      for (FileStatus status : FSSpideringIterator.spiderIterable(fs, granularPath)) {
        final Path filePath = status.getPath();
        if (fileMatcher.matcher(filePath.toString()).matches()) {
          paths.add(filePath.toString());
        }
      }
    }

    for (String path : paths) {
      log.info("Appending path[%s]", path);
      FileInputFormat.addInputPath(job, new Path(path));
    }

    return job;
  }
Example #18
0
  public void dropSegment(DataSegment segment, LoadPeonCallback callback) {
    synchronized (lock) {
      if ((currentlyLoading != null)
          && currentlyLoading.getSegmentIdentifier().equals(segment.getIdentifier())) {
        if (callback != null) {
          currentlyLoading.addCallback(callback);
        }
        return;
      }
    }

    SegmentHolder holder = new SegmentHolder(segment, DROP, Arrays.asList(callback));

    synchronized (lock) {
      if (segmentsToDrop.contains(holder)) {
        if (callback != null) {
          currentlyLoading.addCallback(callback);
        }
        return;
      }
    }

    log.info("Asking server peon[%s] to drop segment[%s]", basePath, segment);
    segmentsToDrop.add(holder);
    doNext();
  }
Example #19
0
  @Override
  protected void map(Object key, Object value, Context context)
      throws IOException, InterruptedException {
    try {
      final InputRow inputRow;
      try {
        inputRow = parseInputRow(value, parser);
      } catch (Exception e) {
        if (config.isIgnoreInvalidRows()) {
          log.debug(e, "Ignoring invalid row [%s] due to parsing error", value.toString());
          context
              .getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER)
              .increment(1);
          return; // we're ignoring this invalid row
        } else {
          throw e;
        }
      }

      if (!granularitySpec.bucketIntervals().isPresent()
          || granularitySpec
              .bucketInterval(new DateTime(inputRow.getTimestampFromEpoch()))
              .isPresent()) {
        innerMap(inputRow, value, context);
      }
    } catch (RuntimeException e) {
      throw new RE(e, "Failure on row[%s]", value);
    }
  }
  public void shutdown() throws IOException {
    final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
    final long end = segmentGranularity.increment(truncatedNow) + windowMillis;
    final Duration timeUntilShutdown = new Duration(System.currentTimeMillis(), end);

    log.info("Shutdown at approx. %s (in %s)", new DateTime(end), timeUntilShutdown);

    ScheduledExecutors.scheduleWithFixedDelay(
        scheduledExecutor,
        timeUntilShutdown,
        new Callable<ScheduledExecutors.Signal>() {
          @Override
          public ScheduledExecutors.Signal call() throws Exception {
            try {
              valveOn.set(false);
            } catch (Exception e) {
              throw Throwables.propagate(e);
            }

            return ScheduledExecutors.Signal.STOP;
          }
        });

    beginRejectionPolicy = true;
  }
Example #21
0
  private Map<String, Object> buildStringKeyMap(ByteBuffer input) {
    Map<String, Object> theMap = Maps.newHashMap();

    try {
      DynamicMessage message = DynamicMessage.parseFrom(descriptor, ByteString.copyFrom(input));
      Map<Descriptors.FieldDescriptor, Object> allFields = message.getAllFields();

      for (Map.Entry<Descriptors.FieldDescriptor, Object> entry : allFields.entrySet()) {
        String name = entry.getKey().getName();
        if (theMap.containsKey(name)) {
          continue;
          // Perhaps throw an exception here?
          // throw new RuntimeException("dupicate key " + name + " in " + message);
        }
        Object value = entry.getValue();
        if (value instanceof Descriptors.EnumValueDescriptor) {
          Descriptors.EnumValueDescriptor desc = (Descriptors.EnumValueDescriptor) value;
          value = desc.getName();
        }

        theMap.put(name, value);
      }

    } catch (InvalidProtocolBufferException e) {
      log.warn(e, "Problem with protobuf something");
    }
    return theMap;
  }
Example #22
0
 @Override
 public void newEntry(String name, Map properties) {
   synchronized (lock) {
     if (currentlyLoading == null) {
       log.warn(
           "Server[%s] a new entry[%s] appeared, even though nothing is currently loading[%s]",
           basePath, name, currentlyLoading);
     } else {
       if (!name.equals(currentlyLoading.getSegmentIdentifier())) {
         log.warn(
             "Server[%s] a new entry[%s] appeared that is not the currently loading entry[%s]",
             basePath, name, currentlyLoading);
       } else {
         log.info("Server[%s]'s currently loading entry[%s] appeared.", basePath, name);
       }
     }
   }
 }
Example #23
0
 @Override
 public void start() throws Exception {
   for (Method method : o.getClass().getMethods()) {
     if (method.getAnnotation(LifecycleStart.class) != null) {
       log.info("Invoking start method[%s] on object[%s].", method, o);
       method.invoke(o);
     }
   }
 }
Example #24
0
  public IncrementalIndexAdapter(
      Interval dataInterval, IncrementalIndex<?> index, BitmapFactory bitmapFactory) {
    this.dataInterval = dataInterval;
    this.index = index;

    /* Sometimes it's hard to tell whether one dimension contains a null value or not.
     * If one dimension had show a null or empty value explicitly, then yes, it contains
     * null value. But if one dimension's values are all non-null, it still early to say
     * this dimension does not contain null value. Consider a two row case, first row had
     * "dimA=1" and "dimB=2", the second row only had "dimA=3". To dimB, its value are "2" and
     * never showed a null or empty value. But when we combines these two rows, dimB is null
     * in row 2. So we should iterate all rows to determine whether one dimension contains
     * a null value.
     */
    this.hasNullValueDimensions = Sets.newHashSet();

    final List<IncrementalIndex.DimensionDesc> dimensions = index.getDimensions();

    indexers = Maps.newHashMapWithExpectedSize(dimensions.size());
    for (IncrementalIndex.DimensionDesc dimension : dimensions) {
      indexers.put(dimension.getName(), new DimensionIndexer(dimension));
    }

    int rowNum = 0;
    for (IncrementalIndex.TimeAndDims timeAndDims : index.getFacts().keySet()) {
      final int[][] dims = timeAndDims.getDims();

      for (IncrementalIndex.DimensionDesc dimension : dimensions) {
        final int dimIndex = dimension.getIndex();
        DimensionIndexer indexer = indexers.get(dimension.getName());
        if (dimIndex >= dims.length || dims[dimIndex] == null) {
          hasNullValueDimensions.add(dimension.getName());
          continue;
        }
        final IncrementalIndex.DimDim values = dimension.getValues();
        if (hasNullValue(values, dims[dimIndex])) {
          hasNullValueDimensions.add(dimension.getName());
        }

        final MutableBitmap[] bitmapIndexes = indexer.invertedIndexes;

        for (Comparable dimIdxComparable : dims[dimIndex]) {
          Integer dimIdx = (Integer) dimIdxComparable;
          if (bitmapIndexes[dimIdx] == null) {
            bitmapIndexes[dimIdx] = bitmapFactory.makeEmptyMutableBitmap();
          }
          try {
            bitmapIndexes[dimIdx].add(rowNum);
          } catch (Exception e) {
            log.info(e.toString());
          }
        }
      }

      ++rowNum;
    }
  }
 // return value means actually delete or not
 public boolean checkedDelete(String namespaceName) {
   final NamespaceImplData implDatum = implData.get(namespaceName);
   if (implDatum == null) {
     // Delete but we don't have it?
     log.wtf("Asked to delete something I just lost [%s]", namespaceName);
     return false;
   }
   return delete(namespaceName);
 }
Example #26
0
 @Override
 public void pushTaskLog(final String taskid, File file) throws IOException {
   if (!config.getDirectory().exists()) {
     config.getDirectory().mkdir();
   }
   final File outputFile = fileForTask(taskid);
   Files.copy(file, outputFile);
   log.info("Wrote task log to: %s", outputFile);
 }
Example #27
0
 private static void awaitNextRetry(final Throwable e, final int nTry)
     throws InterruptedException {
   final long baseSleepMillis = 1000;
   final long maxSleepMillis = 60000;
   final double fuzzyMultiplier = Math.min(Math.max(1 + 0.2 * new Random().nextGaussian(), 0), 2);
   final long sleepMillis =
       (long) (Math.min(maxSleepMillis, baseSleepMillis * Math.pow(2, nTry)) * fuzzyMultiplier);
   log.warn(e, "Failed on try %d, retrying in %,dms.", nTry, sleepMillis);
   Thread.sleep(sleepMillis);
 }
Example #28
0
  public static void addNextRow(
      final Supplier<Committer> committerSupplier,
      final Firehose firehose,
      final Plumber plumber,
      final boolean reportParseExceptions,
      final FireDepartmentMetrics metrics) {
    try {
      final InputRow inputRow = firehose.nextRow();

      if (inputRow == null) {
        if (reportParseExceptions) {
          throw new ParseException("null input row");
        } else {
          log.debug("Discarded null input row, considering unparseable.");
          metrics.incrementUnparseable();
          return;
        }
      }

      // Included in ParseException try/catch, as additional parsing can be done during indexing.
      int numRows = plumber.add(inputRow, committerSupplier);

      if (numRows == -1) {
        metrics.incrementThrownAway();
        log.debug("Discarded row[%s], considering thrownAway.", inputRow);
        return;
      }

      metrics.incrementProcessed();
    } catch (ParseException e) {
      if (reportParseExceptions) {
        throw e;
      } else {
        log.debug(e, "Discarded row due to exception, considering unparseable.");
        metrics.incrementUnparseable();
      }
    } catch (IndexSizeExceededException e) {
      // Shouldn't happen if this is only being called by a single thread.
      // plumber.add should be swapping out indexes before they fill up.
      throw new ISE(e, "WTF?! Index size exceeded, this shouldn't happen. Bad Plumber!");
    }
  }
Example #29
0
 @JsonCreator
 public RandomFirehoseFactory(
     @JsonProperty("sleepUsec") Long sleepUsec,
     @JsonProperty("maxGeneratedRows") Long maxGeneratedRows,
     @JsonProperty("seed") Long seed,
     @JsonProperty("nTokens") Integer nTokens,
     @JsonProperty("nPerSleep") Integer nPerSleep) {
   long nsec = (sleepUsec > 0) ? sleepUsec * 1000L : 0;
   long msec = nsec / 1000000L;
   this.delayMsec = msec;
   this.delayNsec = (int) (nsec - (msec * 1000000L));
   this.maxGeneratedRows = maxGeneratedRows;
   this.seed = seed;
   this.nTokens = nTokens;
   this.nPerSleep = nPerSleep;
   if (nTokens <= 0) {
     log.warn("nTokens parameter " + nTokens + " ignored; must be greater than or equal to 1");
     nTokens = 1;
   }
   if (nPerSleep <= 0) {
     log.warn("nPerSleep parameter " + nPerSleep + " ignored; must be greater than or equal to 1");
     nPerSleep = 1;
   }
   log.info("maxGeneratedRows=" + maxGeneratedRows);
   log.info("seed=" + ((seed == 0L) ? "random value" : seed));
   log.info("nTokens=" + nTokens);
   log.info("nPerSleep=" + nPerSleep);
   double dmsec = (double) delayMsec + ((double) this.delayNsec) / 1000000.;
   if (dmsec > 0.0) {
     log.info("sleep period=" + dmsec + "msec");
     log.info(
         "approximate max rate of record generation="
             + (nPerSleep * 1000. / dmsec)
             + "/sec"
             + "  or  "
             + (60. * nPerSleep * 1000. / dmsec)
             + "/minute");
   } else {
     log.info("sleep period= NONE");
     log.info("approximate max rate of record generation= as fast as possible");
   }
 }
Example #30
0
  @Override
  public void entryRemoved(String name) {
    synchronized (lock) {
      if (currentlyLoading == null) {
        log.warn(
            "Server[%s] an entry[%s] was removed even though it wasn't loading!?", basePath, name);
        return;
      }
      if (!name.equals(currentlyLoading.getSegmentIdentifier())) {
        log.warn(
            "Server[%s] entry [%s] was removed even though it's not what is currently loading[%s]",
            basePath, name, currentlyLoading);
        return;
      }
      actionCompleted();
      log.info("Server[%s] done processing [%s]", basePath, name);
    }

    doNext();
  }