public static void aggregateOpenGammaClusterSensors(DynamicFabric webFabric) {
   // at fabric, take the total for ViewProcesses and Reqs/Sec;
   // and take avg for reqLatency (note: simple avg -- assuming all regions equal)
   webFabric.addEnricher(
       CustomAggregatingEnricher.newSummingEnricher(
           MutableMap.of("allMembers", true),
           OpenGammaMonitoringAggregation.VIEW_PROCESSES_COUNT,
           OpenGammaMonitoringAggregation.VIEW_PROCESSES_COUNT,
           0,
           null));
   webFabric.addEnricher(
       CustomAggregatingEnricher.newSummingEnricher(
           MutableMap.of("allMembers", true),
           DynamicWebAppCluster.REQUESTS_PER_SECOND_IN_WINDOW,
           DynamicWebAppCluster.REQUESTS_PER_SECOND_IN_WINDOW,
           null,
           null));
   webFabric.addEnricher(
       CustomAggregatingEnricher.newSummingEnricher(
           MutableMap.of("allMembers", true),
           OpenGammaMonitoringAggregation.OG_SERVER_COUNT,
           OpenGammaMonitoringAggregation.OG_SERVER_COUNT,
           null,
           null));
   webFabric.addEnricher(
       CustomAggregatingEnricher.newAveragingEnricher(
           MutableMap.of("allMembers", true),
           HttpLatencyDetector.REQUEST_LATENCY_IN_SECONDS_IN_WINDOW,
           HttpLatencyDetector.REQUEST_LATENCY_IN_SECONDS_IN_WINDOW,
           null,
           null));
 }
Esempio n. 2
0
  private SshPollValue exec(String command, Map<String, String> env) throws IOException {
    if (log.isTraceEnabled())
      log.trace(
          "Ssh polling for {}, executing {} with env {}", new Object[] {machine, command, env});
    ByteArrayOutputStream stdout = new ByteArrayOutputStream();
    ByteArrayOutputStream stderr = new ByteArrayOutputStream();

    int exitStatus;
    if (execAsCommand) {
      exitStatus =
          machine.execCommands(
              MutableMap.<String, Object>of("out", stdout, "err", stderr),
              "ssh-feed",
              ImmutableList.of(command),
              env);
    } else {
      exitStatus =
          machine.execScript(
              MutableMap.<String, Object>of("out", stdout, "err", stderr),
              "ssh-feed",
              ImmutableList.of(command),
              env);
    }

    return new SshPollValue(
        machine, exitStatus, new String(stdout.toByteArray()), new String(stderr.toByteArray()));
  }
  @Override
  public void launch() {
    // TODO if can't be root, and ports > 1024 are in the allowed port range,
    // prefer that; could do this on SshMachineLocation which implements PortSupplier,
    // invoked from PortAttrSensorAndConfigKey, which is invoked from
    // MachineLifecycleTasks.preStartCustom
    Networking.checkPortsValid(MutableMap.of("httpPort", getHttpPort()));

    // We wait for evidence of running because, using
    // brooklyn.ssh.config.tool.class=brooklyn.util.internal.ssh.cli.SshCliTool,
    // we saw the ssh session return before the tomcat process was fully running
    // so the process failed to start.
    newScript(MutableMap.of("usePidFile", false), LAUNCHING)
        .body
        .append(
            format("cd %s", getRunDir()),
            BashCommands.requireExecutable("./sbin/nginx"),
            sudoBashCIfPrivilegedPort(
                getHttpPort(),
                format(
                    "nohup ./sbin/nginx -p %s/ -c conf/server.conf > %s 2>&1 &",
                    getRunDir(), getLogFileLocation())),
            format(
                "for i in {1..10}\n"
                    + "do\n"
                    + "    test -f %1$s && ps -p `cat %1$s` && exit\n"
                    + "    sleep 1\n"
                    + "done\n"
                    + "echo \"No explicit error launching nginx but couldn't find process by pid; continuing but may subsequently fail\"\n"
                    + "cat %2$s | tee /dev/stderr",
                getPidFile(), getLogFileLocation()))
        .execute();
  }
Esempio n. 4
0
  @Override
  public void launch() {
    Map ports =
        MutableMap.of(
            "httpPort", getHttpPort(), "jmxPort", getJmxPort(), "shutdownPort", getShutdownPort());

    NetworkUtils.checkPortsValid(ports);
    Map flags = MutableMap.of("usePidFile", false);

    // We wait for evidence of tomcat running because, using
    // brooklyn.ssh.config.tool.class=brooklyn.util.internal.ssh.cli.SshCliTool,
    // we saw the ssh session return before the tomcat process was fully running
    // so the process failed to start.
    newScript(flags, LAUNCHING)
        .body
        .append(
            format("%s/bin/startup.sh >>$RUN/console 2>&1 </dev/null", getExpandedInstallDir()),
            "for i in {1..10}\n"
                + "do\n"
                + "    if [ -s "
                + getLogFileLocation()
                + " ]; then exit; fi\n"
                + "    sleep 1\n"
                + "done\n"
                + "echo \"Couldn't determine if tomcat-server is running (logs/catalina.out is still empty); continuing but may subsequently fail\"")
        .execute();
  }
 @BeforeMethod(alwaysRun = true)
 public void setUp() {
   em = new BasicExecutionManager("mycontext");
   ec = new BasicExecutionContext(em);
   cancellations = new Semaphore(0);
   messages = new ArrayList<String>();
   monitorableJobSemaphoreMap = MutableMap.of();
   monitorableTasksMap = MutableMap.of();
   monitorableTasksMap.clear();
   stopwatch = Stopwatch.createStarted();
 }
 @Override
 public Map<String, Object> getProvisioningFlags(Collection<String> tags) {
   if (tags.size() > 0) {
     LOG.warn("Location {}, ignoring provisioning tags {}", this, tags);
   }
   return MutableMap.<String, Object>of();
 }
 protected void doStartPolling() {
   if (scheduledTask == null || scheduledTask.isDone()) {
     ScheduledTask task =
         new ScheduledTask(MutableMap.of("period", getConfig(POLL_PERIOD)), pollingTaskFactory);
     scheduledTask = ((EntityInternal) entity).getExecutionContext().submit(task);
   }
 }
public class JavaSoftwareProcessSshDriverIntegrationTest {

  private static final long TIMEOUT_MS = 10 * 1000;

  private MachineProvisioningLocation localhost =
      new LocalhostMachineProvisioningLocation(MutableMap.of("name", "localhost"));
  private TestApplication app;

  @BeforeMethod(alwaysRun = true)
  public void setup() {
    app = ApplicationBuilder.newManagedApp(TestApplication.class);
  }

  @AfterMethod(alwaysRun = true)
  public void shutdown() {
    if (app != null) Entities.destroyAll(app);
  }

  @Test(groups = "Integration")
  public void testJavaStartStopSshDriverStartsAndStopsApp() {
    final MyEntity entity = app.createAndManageChild(EntitySpecs.spec(MyEntity.class));
    app.start(ImmutableList.of(localhost));
    Asserts.succeedsEventually(
        MutableMap.of("timeout", TIMEOUT_MS),
        new Runnable() {
          public void run() {
            assertTrue(entity.getAttribute(SoftwareProcess.SERVICE_UP));
          }
        });

    entity.stop();
    assertFalse(entity.getAttribute(SoftwareProcess.SERVICE_UP));
  }
}
  @Test
  public void testPolicyUpdatesModel() {
    final MockContainerEntity containerA = newContainer(app, "A", 10, 20);
    final MockContainerEntity containerB = newContainer(app, "B", 11, 21);
    final MockItemEntity item1 = newItem(app, containerA, "1", 12);
    final MockItemEntity item2 = newItem(app, containerB, "2", 13);

    Asserts.succeedsEventually(
        MutableMap.of("timeout", TIMEOUT_MS),
        new Runnable() {
          public void run() {
            assertEquals(model.getPoolSize(), 2);
            assertEquals(model.getPoolContents(), ImmutableSet.of(containerA, containerB));
            assertEquals(model.getItemWorkrate(item1), 12d);
            assertEquals(model.getItemWorkrate(item2), 13d);

            assertEquals(model.getParentContainer(item1), containerA);
            assertEquals(model.getParentContainer(item2), containerB);
            assertEquals(
                model.getContainerWorkrates(), ImmutableMap.of(containerA, 12d, containerB, 13d));

            assertEquals(model.getPoolLowThreshold(), 10 + 11d);
            assertEquals(model.getPoolHighThreshold(), 20 + 21d);
            assertEquals(model.getCurrentPoolWorkrate(), 12 + 13d);
            assertFalse(model.isHot());
            assertFalse(model.isCold());
          }
        });
  }
  @Override
  public void redeployAll() {
    Map<String, String> wars = MutableMap.copyOf(getConfig(WARS_BY_CONTEXT));
    String redeployPrefix = "Redeploy all WARs (count " + wars.size() + ")";

    log.debug("Redeplying all WARs across cluster " + this + ": " + getConfig(WARS_BY_CONTEXT));

    Iterable<CanDeployAndUndeploy> targetEntities =
        Iterables.filter(getChildren(), CanDeployAndUndeploy.class);
    TaskBuilder<Void> tb =
        Tasks.<Void>builder()
            .parallel(true)
            .name(redeployPrefix + " across cluster (size " + Iterables.size(targetEntities) + ")");
    for (Entity targetEntity : targetEntities) {
      TaskBuilder<Void> redeployAllToTarget =
          Tasks.<Void>builder()
              .name(redeployPrefix + " at " + targetEntity + " (after ready check)");
      for (String warContextPath : wars.keySet()) {
        redeployAllToTarget.add(
            Effectors.invocation(
                targetEntity,
                DEPLOY,
                MutableMap.of("url", wars.get(warContextPath), "targetName", warContextPath)));
      }
      tb.add(
          whenServiceUp(
              targetEntity,
              redeployAllToTarget.build(),
              redeployPrefix + " at " + targetEntity + " when ready"));
    }
    DynamicTasks.queueIfPossible(tb.build()).orSubmitAsync(this).asTask().getUnchecked();
  }
 public Task<?> expunge(final Entity entity, final boolean release) {
   if (mgmt.getEntitlementManager()
       .isEntitled(
           Entitlements.getEntitlementContext(),
           Entitlements.INVOKE_EFFECTOR,
           Entitlements.EntityAndItem.of(entity, "expunge"))) {
     return mgmt.getExecutionManager()
         .submit(
             MutableMap.of(
                 "displayName",
                 "expunging " + entity,
                 "description",
                 "REST call to expunge entity " + entity.getDisplayName() + " (" + entity + ")"),
             new Runnable() {
               @Override
               public void run() {
                 if (release) Entities.destroyCatching(entity);
                 else mgmt.getEntityManager().unmanage(entity);
               }
             });
   }
   throw WebResourceUtils.unauthorized(
       "User '%s' is not authorized to expunge entity %s",
       Entitlements.getEntitlementContext().user(), entity);
 }
  protected Map<?, ?> getRenderingConfigurationFor(String catalogId) {
    MutableMap<Object, Object> result = MutableMap.of();
    CatalogItem<?> item = mgmt.getCatalog().getCatalogItem(catalogId);
    if (item == null) return result;

    result.addIfNotNull("iconUrl", item.getIconUrl());
    return result;
  }
  @Test
  public void testCanSetConfig() {
    final ExampleJavaEnricher enricher =
        new ExampleJavaEnricher(MutableMap.of("displayName", "myName", "myConfig1", "myVal1"));
    entity.addEnricher(enricher);

    assertEquals(enricher.getName(), "myName");
    assertEquals(enricher.myConfig1, "myVal1");
  }
Esempio n. 14
0
  @SuppressWarnings({"unchecked"})
  public void start() {
    // TODO Previous incarnation of this logged this logged polledSensors.keySet(), but we don't
    // know that anymore
    // Is that ok, are can we do better?

    if (log.isDebugEnabled())
      log.debug("Starting poll for {} (using {})", new Object[] {entity, this});
    if (running) {
      throw new IllegalStateException(
          String.format(
              "Attempt to start poller %s of entity %s when already running", this, entity));
    }

    running = true;

    for (final Callable<?> oneOffJob : oneOffJobs) {
      Task<?> task =
          Tasks.builder()
              .dynamic(false)
              .body((Callable<Object>) oneOffJob)
              .name("Poll")
              .description("One-time poll job " + oneOffJob)
              .build();
      oneOffTasks.add(((EntityInternal) entity).getExecutionContext().submit(task));
    }

    for (final PollJob<V> pollJob : pollJobs) {
      final String scheduleName = pollJob.handler.getDescription();
      if (pollJob.pollPeriod.compareTo(Duration.ZERO) > 0) {
        Callable<Task<?>> pollingTaskFactory =
            new Callable<Task<?>>() {
              public Task<?> call() {
                DynamicSequentialTask<Void> task =
                    new DynamicSequentialTask<Void>(
                        MutableMap.of("displayName", scheduleName, "entity", entity),
                        new Callable<Void>() {
                          public Void call() {
                            pollJob.wrappedJob.run();
                            return null;
                          }
                        });
                BrooklynTaskTags.setTransient(task);
                return task;
              }
            };
        ScheduledTask task =
            new ScheduledTask(MutableMap.of("period", pollJob.pollPeriod), pollingTaskFactory);
        tasks.add((ScheduledTask) Entities.submit(entity, task));
      } else {
        if (log.isDebugEnabled())
          log.debug(
              "Activating poll (but leaving off, as period {}) for {} (using {})",
              new Object[] {pollJob.pollPeriod, entity, this});
      }
    }
  }
 private <T> void assertEqualsEventually(final T actual, final T expected) {
   Asserts.succeedsEventually(
       MutableMap.of("timeout", TIMEOUT_MS),
       new Runnable() {
         @Override
         public void run() {
           assertEquals(actual, expected, "actual=" + actual);
         }
       });
 }
  @Test
  public void testModelIncludesItemsAndContainersStartedBeforePolicyCreated() {
    pool.removePolicy(policy);
    policy.destroy();

    // Set-up containers and items.
    final MockContainerEntity containerA = newContainer(app, "A", 10, 100);
    MockItemEntity item1 = newItem(app, containerA, "1", 10);

    policy = new LoadBalancingPolicy(MutableMap.of(), TEST_METRIC, model);
    pool.addPolicy(policy);

    Asserts.succeedsEventually(
        MutableMap.of("timeout", TIMEOUT_MS),
        new Runnable() {
          public void run() {
            assertEquals(model.getContainerWorkrates(), ImmutableMap.of(containerA, 10d));
          }
        });
  }
 public Map<String, Object> customSshConfigKeys() throws UnknownHostException {
   return MutableMap.<String, Object>of(
       "address",
       Networking.getLocalHost(),
       SshTool.PROP_SESSION_TIMEOUT.getName(),
       20000,
       SshTool.PROP_CONNECT_TIMEOUT.getName(),
       50000,
       SshTool.PROP_SCRIPT_HEADER.getName(),
       "#!/bin/bash");
 }
 public WebAppMonitor waitForAtLeastOneAttempt(Duration timeout) {
   Asserts.succeedsEventually(
       MutableMap.of("timeout", timeout),
       new Runnable() {
         @Override
         public void run() {
           Assert.assertTrue(getAttempts() >= 1);
         }
       });
   return this;
 }
  public static void aggregateOpenGammaServerSensors(Entity cluster) {

    List<? extends List<? extends AttributeSensor<? extends Number>>> summingEnricherSetup =
        ImmutableList.of(
            ImmutableList.of(PROCESSING_TIME_PER_SECOND_LAST, PROCESSING_TIME_PER_SECOND_LAST),
            ImmutableList.of(
                PROCESSING_TIME_PER_SECOND_IN_WINDOW, PROCESSING_TIME_PER_SECOND_IN_WINDOW),
            ImmutableList.of(VIEW_PROCESSES_COUNT, VIEW_PROCESSES_COUNT),
            ImmutableList.of(
                PROCESS_CPU_TIME_FRACTION_IN_WINDOW, PROCESS_CPU_TIME_FRACTION_IN_WINDOW));

    for (List<? extends AttributeSensor<? extends Number>> es : summingEnricherSetup) {
      AttributeSensor<? extends Number> t = es.get(0);
      AttributeSensor<? extends Number> total = es.get(1);
      CustomAggregatingEnricher<?, ?> totaller =
          CustomAggregatingEnricher.newSummingEnricher(
              MutableMap.of("allMembers", true), t, total, null, null);
      cluster.addEnricher(totaller);
    }

    cluster.addEnricher(
        Enrichers.builder()
            .aggregating(PROCESSING_TIME_PER_SECOND_LAST)
            .fromMembers()
            .publishing(PROCESSING_TIME_PER_SECOND_LAST_PER_NODE)
            .computingAverage()
            .defaultValueForUnreportedSensors(null)
            .build());
    cluster.addEnricher(
        Enrichers.builder()
            .aggregating(PROCESSING_TIME_PER_SECOND_IN_WINDOW)
            .fromMembers()
            .publishing(PROCESSING_TIME_PER_SECOND_IN_WINDOW_PER_NODE)
            .computingAverage()
            .defaultValueForUnreportedSensors(null)
            .build());
    cluster.addEnricher(
        Enrichers.builder()
            .aggregating(PROCESS_CPU_TIME_FRACTION_IN_WINDOW)
            .fromMembers()
            .publishing(PROCESS_CPU_TIME_FRACTION_IN_WINDOW_PER_NODE)
            .computingAverage()
            .defaultValueForUnreportedSensors(null)
            .build());
    cluster.addEnricher(
        Enrichers.builder()
            .aggregating(VIEW_PROCESSES_COUNT)
            .fromMembers()
            .publishing(VIEW_PROCESSES_COUNT_PER_NODE)
            .computingAverage()
            .defaultValueForUnreportedSensors(0)
            .build());
  }
Esempio n. 20
0
 @Override
 public void stop() {
   // Don't `kill -9`, as that doesn't stop the worker processes
   newScript(MutableMap.of("usePidFile", false), STOPPING)
       .body
       .append(
           format("cd %s", getRunDir()),
           format("export PID=`cat %s`", getPidFile()),
           "test -n \"$PID\" || exit 0",
           sudoIfPrivilegedPort(getHttpPort(), "kill $PID"))
       .execute();
 }
  @Override
  public void undeploy(String targetName) {
    checkNotNull(targetName, "targetName");
    targetName = FILENAME_TO_WEB_CONTEXT_MAPPER.convertDeploymentTargetNameToContext(targetName);

    // set it up so future nodes get the right wars
    if (!removeFromWarsByContext(this, targetName)) {
      DynamicTasks.submit(
          Tasks.warning(
              "Context "
                  + targetName
                  + " not known at "
                  + this
                  + "; attempting to undeploy regardless",
              null),
          this);
    }

    log.debug(
        "Undeploying "
            + targetName
            + " across cluster "
            + this
            + "; WARs now "
            + getConfig(WARS_BY_CONTEXT));

    Iterable<CanDeployAndUndeploy> targets =
        Iterables.filter(getChildren(), CanDeployAndUndeploy.class);
    TaskBuilder<Void> tb =
        Tasks.<Void>builder()
            .parallel(true)
            .name(
                "Undeploy "
                    + targetName
                    + " across cluster (size "
                    + Iterables.size(targets)
                    + ")");
    for (Entity target : targets) {
      tb.add(
          whenServiceUp(
              target,
              Effectors.invocation(target, UNDEPLOY, MutableMap.of("targetName", targetName)),
              "Undeploy " + targetName + " at " + target + " when ready"));
    }
    DynamicTasks.queueIfPossible(tb.build()).orSubmitAsync(this).asTask().getUnchecked();

    // Update attribute
    Set<String> deployedWars = MutableSet.copyOf(getAttribute(DEPLOYED_WARS));
    deployedWars.remove(
        FILENAME_TO_WEB_CONTEXT_MAPPER.convertDeploymentTargetNameToContext(targetName));
    setAttribute(DEPLOYED_WARS, deployedWars);
  }
  /** @deprecated since 0.7; support for rebinding old-style entities is deprecated */
  @Test
  public void testHandlesOldStyleEntity() throws Exception {
    MyOldStyleEntity origE = new MyOldStyleEntity(MutableMap.of("confName", "myval"), origApp);
    Entities.manage(origE);

    newApp = rebind();

    MyOldStyleEntity newE =
        (MyOldStyleEntity)
            Iterables.find(newApp.getChildren(), EntityPredicates.idEqualTo(origE.getId()));

    assertEquals(newE.getConfig(MyOldStyleEntity.CONF_NAME), "myval");
  }
 private void assertHasEventEventually(
     final Sensor<?> sensor,
     final Predicate<Object> componentPredicate,
     final Predicate<? super CharSequence> descriptionPredicate) {
   Asserts.succeedsEventually(
       MutableMap.of("timeout", TIMEOUT_MS),
       new Runnable() {
         @Override
         public void run() {
           assertHasEvent(sensor, componentPredicate, descriptionPredicate);
         }
       });
 }
 @Override
 public Map<String, ?> getFlags() {
   return MutableMap.of(
       "locationId",
       LOCATION_ID,
       "imageId",
       IMAGE_ID,
       "uri",
       URI + IMAGE_ID,
       "groupId",
       "storm-test",
       "stopIptables",
       "true");
 }
  @Test(groups = "Integration")
  public void testJavaStartStopSshDriverStartsAndStopsApp() {
    final MyEntity entity = app.createAndManageChild(EntitySpecs.spec(MyEntity.class));
    app.start(ImmutableList.of(localhost));
    Asserts.succeedsEventually(
        MutableMap.of("timeout", TIMEOUT_MS),
        new Runnable() {
          public void run() {
            assertTrue(entity.getAttribute(SoftwareProcess.SERVICE_UP));
          }
        });

    entity.stop();
    assertFalse(entity.getAttribute(SoftwareProcess.SERVICE_UP));
  }
  @Test(groups = "Integration")
  public void testWaitsForServiceUp() throws Exception {
    entity.setAttribute(TestEntity.SERVICE_UP, false);

    entity.addEnricher(
        HttpLatencyDetector.builder().url(baseUrl).period(100, TimeUnit.MILLISECONDS).build());

    // nothing until url is set
    EntityTestUtils.assertAttributeEqualsContinually(
        MutableMap.of("timeout", 200),
        entity,
        HttpLatencyDetector.REQUEST_LATENCY_IN_SECONDS_MOST_RECENT,
        null);

    // gets value after url is set, and gets rolling average
    entity.setAttribute(TestEntity.SERVICE_UP, true);
    assertLatencyAttributesNonNull(entity);
  }
  @Override
  public void deploy(String url, String targetName) {
    checkNotNull(url, "url");
    checkNotNull(targetName, "targetName");
    targetName = FILENAME_TO_WEB_CONTEXT_MAPPER.convertDeploymentTargetNameToContext(targetName);

    // set it up so future nodes get the right wars
    addToWarsByContext(this, url, targetName);

    log.debug(
        "Deploying "
            + targetName
            + "->"
            + url
            + " across cluster "
            + this
            + "; WARs now "
            + getConfig(WARS_BY_CONTEXT));

    Iterable<CanDeployAndUndeploy> targets =
        Iterables.filter(getChildren(), CanDeployAndUndeploy.class);
    TaskBuilder<Void> tb =
        Tasks.<Void>builder()
            .parallel(true)
            .name("Deploy " + targetName + " to cluster (size " + Iterables.size(targets) + ")");
    for (Entity target : targets) {
      tb.add(
          whenServiceUp(
              target,
              Effectors.invocation(
                  target, DEPLOY, MutableMap.of("url", url, "targetName", targetName)),
              "Deploy " + targetName + " to " + target + " when ready"));
    }
    DynamicTasks.queueIfPossible(tb.build()).orSubmitAsync(this).asTask().getUnchecked();

    // Update attribute
    // TODO support for atomic sensor update (should be part of standard tooling; NB there is some
    // work towards this, according to @aledsage)
    Set<String> deployedWars = MutableSet.copyOf(getAttribute(DEPLOYED_WARS));
    deployedWars.add(targetName);
    setAttribute(DEPLOYED_WARS, deployedWars);
  }
 public Task<?> destroy(final Application application) {
   return mgmt.getExecutionManager()
       .submit(
           MutableMap.of(
               "displayName",
               "destroying " + application,
               "description",
               "REST call to destroy application "
                   + application.getDisplayName()
                   + " ("
                   + application
                   + ")"),
           new Runnable() {
             @Override
             public void run() {
               ((EntityInternal) application).destroy();
               mgmt.getEntityManager().unmanage(application);
             }
           });
 }
Esempio n. 29
0
  @SuppressWarnings({"unchecked", "rawtypes"})
  protected void recomputeAfterDelay(long delay) {
    if (isRunning() && executorQueued.compareAndSet(false, true)) {
      long now = System.currentTimeMillis();
      delay = Math.max(0, Math.max(delay, (executorTime + MIN_PERIOD_BETWEEN_EXECS_MILLIS) - now));
      if (LOG.isTraceEnabled()) LOG.trace("{} scheduling publish in {}ms", this, delay);

      Runnable job =
          new Runnable() {
            @Override
            public void run() {
              try {
                executorTime = System.currentTimeMillis();
                executorQueued.set(false);

                onEvent(null);

              } catch (Exception e) {
                if (isRunning()) {
                  LOG.error("Error in enricher " + this + ": " + e, e);
                } else {
                  if (LOG.isDebugEnabled())
                    LOG.debug("Error in enricher " + this + " (but no longer running): " + e, e);
                }
              } catch (Throwable t) {
                LOG.error("Error in enricher " + this + ": " + t, t);
                throw Exceptions.propagate(t);
              }
            }
          };

      ScheduledTask task =
          new ScheduledTask(
              MutableMap.of("delay", Duration.of(delay, TimeUnit.MILLISECONDS)),
              new BasicTask(job));
      ((EntityInternal) entity).getExecutionContext().submit(task);
    }
  }
  public static Map<ConfigKey<String>, String> finalAndOriginalSpecs(
      String finalSpec, Object... sourcesForOriginalSpec) {
    // yuck!: TODO should clean up how these things get passed around
    Map<ConfigKey<String>, String> result = MutableMap.of();
    if (finalSpec != null) result.put(LocationInternal.FINAL_SPEC, finalSpec);

    String originalSpec = null;
    for (Object source : sourcesForOriginalSpec) {
      if (source instanceof CharSequence) originalSpec = source.toString();
      else if (source instanceof Map) {
        if (originalSpec == null)
          originalSpec = Strings.toString(((Map<?, ?>) source).get(LocationInternal.ORIGINAL_SPEC));
        if (originalSpec == null)
          originalSpec =
              Strings.toString(((Map<?, ?>) source).get(LocationInternal.ORIGINAL_SPEC.getName()));
      }
      if (originalSpec != null) break;
    }
    if (originalSpec == null) originalSpec = finalSpec;
    if (originalSpec != null) result.put(LocationInternal.ORIGINAL_SPEC, originalSpec);

    return result;
  }