Exemplo n.º 1
0
  @Test(
      description = "POST /vApp/{id}/action/deploy",
      dependsOnMethods = {"testGetVm"})
  public void testDeployVm() {
    DeployVAppParams params =
        DeployVAppParams.builder()
            .deploymentLeaseSeconds((int) TimeUnit.SECONDS.convert(1L, TimeUnit.HOURS))
            .notForceCustomization()
            .notPowerOn()
            .build();

    // The method under test
    Task deployVm = vmApi.deploy(vmUrn, params);
    assertTrue(
        retryTaskSuccessLong.apply(deployVm), String.format(TASK_COMPLETE_TIMELY, "deployVm"));

    // Get the edited Vm
    vm = vmApi.get(vmUrn);

    // Check the required fields are set
    assertTrue(
        vm.isDeployed(),
        String.format(OBJ_FIELD_EQ, VM, "deployed", "TRUE", vm.isDeployed().toString()));

    // Check status
    assertVmStatus(vmUrn, Status.POWERED_OFF);
  }
Exemplo n.º 2
0
  /** @see VmApi#remove(String) */
  @Test(description = "DELETE /vApp/{id}")
  public void testRemoveVm() {
    // Create a temporary VApp to remove
    VApp remove = instantiateVApp();
    DeployVAppParams params =
        DeployVAppParams.builder()
            .deploymentLeaseSeconds((int) TimeUnit.SECONDS.convert(1L, TimeUnit.HOURS))
            .notForceCustomization()
            .powerOn()
            .build();
    Task deployVApp = vAppApi.deploy(remove.getId(), params);
    assertTaskSucceedsLong(deployVApp);

    // Get the edited VApp and the Vm
    remove = vAppApi.get(remove.getId());
    List<Vm> vms = remove.getChildren().getVms();
    Vm temp = Iterables.get(vms, 0);

    // otherwise it's impossible to stop a running vApp with no vms
    if (vms.size() == 1) {
      UndeployVAppParams undeployParams = UndeployVAppParams.builder().build();
      Task shutdownVapp = vAppApi.undeploy(remove.getId(), undeployParams);
      assertTaskSucceedsLong(shutdownVapp);
    } else {
      powerOffVm(temp.getId());
    }
    // The method under test
    Task removeVm = vmApi.remove(temp.getId());
    assertTrue(retryTaskSuccess.apply(removeVm), String.format(TASK_COMPLETE_TIMELY, "removeVm"));

    Vm removed = vmApi.get(temp.getId());
    assertNull(removed, "The Vm " + temp.getName() + " should have been removed");
  }
Exemplo n.º 3
0
 @Override
 protected Map<String, String> getHeaders(RemoteBehavior remoteBehavior) {
   Map<String, String> headers = super.getHeaders(remoteBehavior);
   headers.put(
       ExtendedHttpHeader.X_TTL.toString(),
       String.valueOf(TimeUnit.SECONDS.convert(ttl, timeUnit)));
   return headers;
 }
 private long getLocatorTimeout() {
   if (StepEventBus.getEventBus().aStepInTheCurrentTestHasFailed()
       || (MethodTiming.forThisThread().isInQuickMethod())) {
     return 0;
   } else {
     return TimeUnit.SECONDS.convert(implicitTimeoutInMilliseconds, TimeUnit.MILLISECONDS);
   }
 }
 private static void addRuntimeHiveRegistrationProperties(State state) {
   // Use seconds instead of milliseconds to be consistent with other times stored in hive
   state.appendToListProp(
       HiveRegProps.HIVE_TABLE_PARTITION_PROPS,
       String.format(
           "%s:%d",
           DATA_PUBLISH_TIME,
           TimeUnit.SECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS)));
 }
    public ShardedDistributedMessageQueue build() throws MessageQueueException {
      Preconditions.checkArgument(
          TimeUnit.SECONDS.convert(lockTimeout, TimeUnit.MICROSECONDS) < lockTtl,
          "Timeout "
              + lockTtl
              + " seconds must be less than TTL "
              + TimeUnit.SECONDS.convert(lockTtl, TimeUnit.MICROSECONDS)
              + " seconds");
      Preconditions.checkNotNull(keyspace, "Must specify keyspace");

      if (shardReaderPolicyFactory == null)
        shardReaderPolicyFactory = TimePartitionedShardReaderPolicy.Factory.builder().build();

      if (modShardPolicy == null) modShardPolicy = TimeModShardPolicy.getInstance();

      if (stats == null) stats = new CountingQueueStats();

      return new ShardedDistributedMessageQueue(this);
    }
Exemplo n.º 7
0
  public static String getSpeed(final long bytes, final long timeElapsed) {
    final String ret;

    if (timeElapsed > 0) {
      final long bytesPerSec =
          Math.round(bytes / (double) TimeUnit.SECONDS.convert(timeElapsed, TimeUnit.MILLISECONDS));

      ret = BackupUtil.humanReadableByteCount(bytesPerSec) + "/s";
    } else {
      ret = "?";
    }

    return ret;
  }
Exemplo n.º 8
0
 /**
  * Instantiates a new Auction.
  *
  * @param client the client
  * @param auctionTitle the auction title
  * @param auctionItem the auction item
  * @param duration the duration
  * @param timeUnit the time unit
  * @param auctionType the auction type
  */
 public Auction(
     Client client,
     String auctionTitle,
     AuctionItem auctionItem,
     long duration,
     TimeUnit timeUnit,
     AuctionType auctionType) {
   this.client = client;
   this.isRunning = true;
   this.auctionTitle = auctionTitle;
   this.auctionItem = auctionItem;
   this.duration = TimeUnit.SECONDS.convert(duration, timeUnit);
   this.auctionID = surrogateAuctionID.incrementAndGet();
   this.auctionType = auctionType;
 }
Exemplo n.º 9
0
 public String toString() {
   Socket tmp_sock = sock;
   if (tmp_sock == null) return "<null socket>";
   InetAddress local = tmp_sock.getLocalAddress(), remote = tmp_sock.getInetAddress();
   String local_str = local != null ? Util.shortName(local) : "<null>";
   String remote_str = remote != null ? Util.shortName(remote) : "<null>";
   return String.format(
       "%s:%s --> %s:%s (%d secs old) [%s] [recv_buf=%d]",
       local_str,
       tmp_sock.getLocalPort(),
       remote_str,
       tmp_sock.getPort(),
       TimeUnit.SECONDS.convert(getTimestamp() - last_access, TimeUnit.NANOSECONDS),
       status(),
       receiver != null ? receiver.bufferSize() : 0);
 }
Exemplo n.º 10
0
 @Test
 public void testMillisToHMS() {
   long lHours = TimeUnit.HOURS.convert(12783000, TimeUnit.MILLISECONDS);
   TestCase.assertEquals(3, lHours);
   long lMinutes =
       TimeUnit.MINUTES.convert(
           12783000 - TimeUnit.MILLISECONDS.convert(lHours, TimeUnit.HOURS),
           TimeUnit.MILLISECONDS);
   TestCase.assertEquals(33, lMinutes);
   long lSeconds =
       TimeUnit.SECONDS.convert(
           12783000
               - (TimeUnit.MILLISECONDS.convert(lHours, TimeUnit.HOURS)
                   + TimeUnit.MILLISECONDS.convert(lMinutes, TimeUnit.MINUTES)),
           TimeUnit.MILLISECONDS);
   TestCase.assertEquals(3, lSeconds);
 }
    @Override
    public void onPostExecute(NotificationEvent _event) {
      if (_event == null || _event.getTravelTime() == FAILURE) {
        Log.w(TAG, "Some problem determining whether late or not");
        // TODO User notification
        return;
      }

      Time t = new Time();
      t.setToNow();
      long nowSec = TimeUnit.SECONDS.convert(t.toMillis(false), TimeUnit.MILLISECONDS);

      boolean isLate = isLate(nowSec, _event.getTravelTime(), _event.getCalendarEvent().getWhen());
      if (isLate) {
        Log.i(TAG, "Event *is* late: " + mCalendarEvent.getDebugString());
        buildLateNotification(_event, mCalendarEvent);
      } else {
        Log.i(TAG, "Event *is not* late: " + mCalendarEvent.getDebugString());
      }
    }
  public static Map testForResponseElement(
      RestTestHarness harness,
      String testServerBaseUrl,
      String uri,
      CloudSolrClient cloudSolrClient,
      List<String> jsonPath,
      Object expected,
      long maxTimeoutSeconds)
      throws Exception {

    boolean success = false;
    long startTime = System.nanoTime();
    Map m = null;

    while (TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS)
        < maxTimeoutSeconds) {
      try {
        m =
            testServerBaseUrl == null
                ? getRespMap(uri, harness)
                : TestSolrConfigHandlerConcurrent.getAsMap(
                    testServerBaseUrl + uri, cloudSolrClient);
      } catch (Exception e) {
        Thread.sleep(100);
        continue;
      }
      if (Objects.equals(expected, Utils.getObjectByPath(m, false, jsonPath))) {
        success = true;
        break;
      }
      Thread.sleep(100);
    }
    assertTrue(
        StrUtils.formatString(
            "Could not get expected value  ''{0}'' for path ''{1}'' full output: {2},  from server:  {3}",
            expected, StrUtils.join(jsonPath, '/'), getAsString(m), testServerBaseUrl),
        success);

    return m;
  }
Exemplo n.º 13
0
  public void addChangeColorEventListener(VideoTag type, WebPage testClient, String name) {
    final long timeoutSeconds = TimeUnit.SECONDS.convert(timeout, timeoutTimeUnit);

    if (type.getVideoTagType() == VideoTagType.LOCAL) {
      localChangeColor = new ChangeColorObservable();
      localChangeColor.addListener(this);
      localColorTrigger =
          new Thread(new ColorTrigger(type, testClient, localChangeColor, timeoutSeconds));
      if (name != null) {
        localColorTrigger.setName(name);
      }
      localColorTrigger.start();
    } else {
      remoteChangeColor = new ChangeColorObservable();
      remoteChangeColor.addListener(this);
      remoteColorTrigger =
          new Thread(new ColorTrigger(type, testClient, remoteChangeColor, timeoutSeconds));
      if (name != null) {
        remoteColorTrigger.setName(name);
      }
      remoteColorTrigger.start();
    }
  }
  void shutdownResources() {
    final TCLogger logger = DSO_LOGGER;

    if (this.counterManager != null) {
      try {
        this.counterManager.shutdown();
      } catch (final Throwable t) {
        logger.error("error shutting down counter manager", t);
      } finally {
        this.counterManager = null;
      }
    }

    if (this.tcMemManager != null) {
      try {
        this.tcMemManager.shutdown();
      } catch (final Throwable t) {
        logger.error("Error stopping memory manager", t);
      } finally {
        this.tcMemManager = null;
      }
    }

    if (this.lockManager != null) {
      try {
        this.lockManager.shutdown(false);
      } catch (final Throwable t) {
        logger.error("Error stopping lock manager", t);
      } finally {
        this.lockManager = null;
      }
    }

    try {
      this.communicationStageManager.stopAll();
    } catch (final Throwable t) {
      logger.error("Error stopping stage manager", t);
    }

    if (this.channel != null) {
      try {
        this.channel.close();
      } catch (final Throwable t) {
        logger.error("Error closing channel", t);
      } finally {
        this.channel = null;
      }
    }

    if (this.communicationsManager != null) {
      try {
        this.communicationsManager.shutdown();
      } catch (final Throwable t) {
        logger.error("Error shutting down communications manager", t);
      } finally {
        this.communicationsManager = null;
      }
    }

    if (taskRunner != null) {
      logger.info("Shutting down TaskRunner");
      taskRunner.shutdown();
    }

    CommonShutDownHook.shutdown();
    this.cluster.shutdown();

    if (this.threadGroup != null) {
      boolean interrupted = false;

      try {
        final long end =
            System.currentTimeMillis()
                + TCPropertiesImpl.getProperties()
                    .getLong(TCPropertiesConsts.L1_SHUTDOWN_THREADGROUP_GRACETIME);

        int threadCount = this.threadGroup.activeCount();
        Thread[] t = new Thread[threadCount];
        threadCount = this.threadGroup.enumerate(t);
        final long time = System.currentTimeMillis();
        for (int x = 0; x < threadCount; x++) {
          long start = System.currentTimeMillis();
          while (System.currentTimeMillis() < end && t[x].isAlive()) {
            t[x].join(1000);
          }
          logger.info(
              "Destroyed thread "
                  + t[x].getName()
                  + " time to destroy:"
                  + (System.currentTimeMillis() - start)
                  + " millis");
        }
        logger.info(
            "time to destroy thread group:"
                + TimeUnit.SECONDS.convert(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS)
                + " seconds");

        if (this.threadGroup.activeCount() > 0) {
          logger.warn(
              "Timed out waiting for TC thread group threads to die - probable shutdown memory leak\n"
                  + "Live threads: "
                  + getLiveThreads(this.threadGroup));

          Thread threadGroupCleanerThread =
              new Thread(
                  this.threadGroup.getParent(),
                  new TCThreadGroupCleanerRunnable(threadGroup),
                  "TCThreadGroup last chance cleaner thread");
          threadGroupCleanerThread.setDaemon(true);
          threadGroupCleanerThread.start();
          logger.warn("Spawning TCThreadGroup last chance cleaner thread");
        } else {
          logger.info("Destroying TC thread group");
          this.threadGroup.destroy();
        }
      } catch (final Throwable t) {
        logger.error("Error destroying TC thread group", t);
      } finally {
        if (interrupted) {
          Thread.currentThread().interrupt();
        }
      }
    }

    if (TCPropertiesImpl.getProperties()
        .getBoolean(TCPropertiesConsts.L1_SHUTDOWN_FORCE_FINALIZATION)) System.runFinalization();
  }
Exemplo n.º 15
0
 public Authorization(int expires, TimeUnit expiresUnit) {
   this.expires.add(Calendar.SECOND, (int) TimeUnit.SECONDS.convert(expires, expiresUnit));
 }
 public QueryForDirectionsTask(CalendarEvent _event) {
   this.mCalendarEvent = _event;
   this.mEventLocation = _event.getWhere();
   this.mEventTimeSec = TimeUnit.SECONDS.convert(_event.getWhen(), TimeUnit.MILLISECONDS);
 }
Exemplo n.º 17
0
/**
 * A cache decorator which implements read ahead refreshing. Read ahead occurs when a cache entry is
 * accessed prior to its expiration, and triggers a reload of the value in the background.
 *
 * <p>A significant attempt is made to ensure only one node of the cache works on a specific key at
 * a time. There is no guarantee that every triggered refresh ahead case will be processed. As the
 * maximum number of backlog entries is reached, refresh ahead requests will be dropped silently.
 *
 * <p>Provided the Number of threads per node * Number of nodes < the maximum backlog, only one node
 * in the cluster will refresha given key at a time.
 *
 * @author cschanck
 */
public class RefreshAheadCache extends EhcacheDecoratorAdapter {

  private static final Object REFRESH_VALUE = Boolean.TRUE;
  private static final int DEFAULT_SUPPORT_TTL_SECONDS =
      (int) TimeUnit.SECONDS.convert(10, TimeUnit.MINUTES);
  private final AtomicLong refreshSuccessCount = new AtomicLong();
  private final RefreshAheadCacheConfiguration refreshAheadConfig;
  private CacheConfiguration supportConfig;

  private volatile Ehcache supportCache;
  private volatile ThreadedWorkQueue<Object> refreshWorkQueue;

  /**
   * Create a Refresh Ahead Cache Adaptor with the specified configuration. An auxiliary EhCache
   * Cache will be created for the purposes of synchronization, so only one node in a clustered
   * environment will refresh a key at a given time.
   *
   * @param adaptedCache
   * @param refreshConfig
   */
  public RefreshAheadCache(Ehcache adaptedCache, RefreshAheadCacheConfiguration refreshConfig) {

    super(adaptedCache);
    this.refreshAheadConfig = refreshConfig;

    // XA transactions cannot actually refresh sensibly. At least not
    // reasonably. GAE doesn't support threads. Other conditions around?
    boolean refreshAllowed = !underlyingCache.getCacheConfiguration().isXaStrictTransactional();
    refreshAllowed = refreshAllowed && !underlyingCache.getCacheConfiguration().isXaTransactional();
    refreshAllowed =
        refreshAllowed && !underlyingCache.getCacheConfiguration().isLocalTransactional();
    refreshAllowed = refreshAllowed && !VmUtils.isInGoogleAppEngine();

    if (refreshAllowed) {
      initSupportCache();
      initWorkQueue();
    } else {
      throw new UnsupportedOperationException(
          "refresh-ahead not supported under transactions or with GAE");
    }
  }

  private void initSupportCache() {
    // create the support cache
    // make this cache clustered in the same way as the underlying cache,
    this.supportConfig = new CacheConfiguration();
    supportConfig.name(
        underlyingCache.getName() + "_" + getClass().getName() + "_refreshAheadSupport");
    supportConfig =
        supportConfig.persistence(new PersistenceConfiguration().strategy(Strategy.NONE));
    int activeSize =
        2 * refreshAheadConfig.getBatchSize() * refreshAheadConfig.getNumberOfThreads();
    supportConfig = supportConfig.maxEntriesLocalHeap(activeSize);
    supportConfig = supportConfig.memoryStoreEvictionPolicy(MemoryStoreEvictionPolicy.LRU);
    supportConfig = supportConfig.timeToLiveSeconds(DEFAULT_SUPPORT_TTL_SECONDS);

    // TC stuff
    if (underlyingCache.getCacheConfiguration().isTerracottaClustered()) {
      supportConfig =
          supportConfig.persistence(new PersistenceConfiguration().strategy(Strategy.DISTRIBUTED));

      TerracottaConfiguration newTerracottaConfig = new TerracottaConfiguration().clustered(true);

      newTerracottaConfig.consistency(Consistency.STRONG);

      supportConfig.addTerracotta(newTerracottaConfig);
    } else {
      supportConfig.setMaxElementsOnDisk(activeSize);
    }

    // here we try to create the support cache.
    this.supportCache = new Cache(supportConfig);

    Ehcache prior = underlyingCache.getCacheManager().addCacheIfAbsent(supportCache);
    if (prior != supportCache) {
      throw new IllegalStateException(
          "Unable to add refresh ahead support cache due to name collision: "
              + refreshAheadConfig.getName());
    }

    // wipe it on startup. might wobble in a clustered case, but clears out orphans.
    prior.removeAll();

    // catch the dispose. not sure this is the best way to do it at all.
    // we could register a listener alternatively
    underlyingCache.registerCacheExtension(
        new CacheExtension() {

          @Override
          public void init() {}

          @Override
          public Status getStatus() {
            return underlyingCache.getStatus();
          }

          @Override
          public void dispose() throws CacheException {
            RefreshAheadCache.this.localDispose();
          }

          @Override
          public CacheExtension clone(Ehcache cache) throws CloneNotSupportedException {
            throw new CloneNotSupportedException();
          }
        });
  }

  private void initWorkQueue() {
    BatchWorker<Object> batchWorker =
        new BatchWorker<Object>() {

          @Override
          public void process(Collection<? extends Object> collection) {

            // only fetch this once for each process() call
            long accessTime = System.currentTimeMillis();

            HashSet<Object> keysToProcess = new HashSet<Object>();
            for (Object key : collection) {

              // check if it was loaded by someone else in the meantime -- does it still qualify for
              // refresh ahead?
              Element quickTest = underlyingCache.getQuiet(key);
              if (quickTest == null
                  || checkForRefresh(
                      quickTest, accessTime, refreshAheadConfig.getTimeToRefreshMillis())) {
                final Element ersatz = new Element(key, REFRESH_VALUE);

                if (supportCache.putIfAbsent(ersatz) == null) {
                  // work, work, work
                  keysToProcess.add(key);
                }
              }
            }
            try {
              // iterate through the loaders
              for (CacheLoader loader : underlyingCache.getRegisteredCacheLoaders()) {
                // if we are out of keys, punt
                if (keysToProcess.isEmpty()) {
                  break;
                }

                // try and load them all
                Map<? extends Object, ? extends Object> values = loader.loadAll(keysToProcess);
                // subtract the ones that were loaded
                keysToProcess.removeAll(values.keySet());
                try {
                  for (Map.Entry<? extends Object, ? extends Object> entry : values.entrySet()) {
                    Element newElement = new Element(entry.getKey(), entry.getValue());
                    underlyingCache.put(newElement);
                    refreshSuccessCount.incrementAndGet();
                  }
                } finally {
                  // subtract from the support cache
                  supportCache.removeAll(values.keySet());
                }
              }
              // assume we got here ok, now evict any that don't evict
              if (refreshAheadConfig.isEvictOnLoadMiss() && !keysToProcess.isEmpty()) {
                underlyingCache.removeAll(keysToProcess);
              }
            } finally {
              // this is utterly paranoid. but still.
              supportCache.removeAll(keysToProcess);
            }
          }
        };

    this.refreshWorkQueue =
        new ThreadedWorkQueue<Object>(
            batchWorker,
            refreshAheadConfig.getNumberOfThreads(),
            new ThreadFactory() {

              @Override
              public Thread newThread(Runnable r) {
                Thread t = new Thread(r);
                t.setDaemon(true);
                return t;
              }
            },
            refreshAheadConfig.getMaximumRefreshBacklogItems(),
            refreshAheadConfig.getBatchSize());
  }

  private boolean checkForRefresh(Element elem, long accessTime, long timeToRefreshMillis) {
    if (elem == null) {
      return false;
    }

    long minAccessForRefreshTime = elem.getCreationTime() + timeToRefreshMillis;

    return (accessTime >= minAccessForRefreshTime);
  }

  private void possiblyTriggerRefresh(Element elem, long timeToRefreshMillis) {
    if (checkForRefresh(elem, System.currentTimeMillis(), timeToRefreshMillis)) {
      // now add the key to the queue. smallest overhead we could get.
      refreshWorkQueue.offer(elem.getObjectKey());
    }
  }

  @Override
  public Element get(Object key) throws IllegalStateException, CacheException {
    Element elem = super.get(key);
    possiblyTriggerRefresh(elem, refreshAheadConfig.getTimeToRefreshMillis());
    return elem;
  }

  @Override
  public Element get(Serializable key) throws IllegalStateException, CacheException {
    Element elem = super.get(key);
    possiblyTriggerRefresh(elem, refreshAheadConfig.getTimeToRefreshMillis());
    return elem;
  }

  /** number of refreshes processed locally. */
  @org.terracotta.statistics.Statistic(name = "refreshed", tags = "refreshahead")
  public long getRefreshSuccessCount() {
    return refreshSuccessCount.get();
  }

  private void localDispose() throws IllegalStateException {
    synchronized (this) {
      if (refreshWorkQueue != null) {
        refreshWorkQueue.shutdown();
        refreshWorkQueue = null;
      }
      if (supportCache != null) {
        try {
          supportCache.getCacheManager().removeCache(getName());
        } catch (Throwable t) {
        }
        supportCache = null;
      }
    }
  }

  @Override
  public String getName() {
    if (refreshAheadConfig.getName() != null) {
      return refreshAheadConfig.getName();
    }
    return super.getName();
  }

  /**
   * Gets offer count.
   *
   * @return the offer count
   */
  @org.terracotta.statistics.Statistic(name = "offered", tags = "refreshahead")
  public long getOfferCount() {
    return refreshWorkQueue.getOfferedCount();
  }

  /**
   * Gets dropped count.
   *
   * @return the dropped count
   */
  @org.terracotta.statistics.Statistic(name = "dropped", tags = "refreshahead")
  public long getDroppedCount() {
    return refreshWorkQueue.getDroppedCount();
  }

  /**
   * Gets processed count.
   *
   * @return the processed count
   */
  @org.terracotta.statistics.Statistic(name = "processed", tags = "refreshahead")
  public long getProcessedCount() {
    return refreshWorkQueue.getProcessedCount();
  }

  /**
   * Gets backlog count.
   *
   * @return the backlog count
   */
  @org.terracotta.statistics.Statistic(name = "backlog", tags = "refreshahead")
  public long getBacklogCount() {
    return refreshWorkQueue.getBacklogCount();
  }

  /**
   * Find refreshed counter statistic.
   *
   * @param cache the cache this statistic is attached to.
   * @return the set
   */
  public static Set<ExtendedStatistics.Statistic<Number>> findRefreshedStatistic(Ehcache cache) {
    return cache
        .getStatistics()
        .getExtended()
        .passthru("refreshed", Collections.singletonMap("refreshahead", null).keySet());
  }

  /**
   * Find offer statistic.
   *
   * @param cache the cache this statistic is attached to.
   * @return the set
   */
  public static Set<ExtendedStatistics.Statistic<Number>> findOfferStatistic(Ehcache cache) {
    return cache
        .getStatistics()
        .getExtended()
        .passthru("offered", Collections.singletonMap("refreshahead", null).keySet());
  }

  /**
   * Find dropped statistic.
   *
   * @param cache the cache
   * @return the set
   */
  public static Set<ExtendedStatistics.Statistic<Number>> findDroppedStatistic(Ehcache cache) {
    return cache
        .getStatistics()
        .getExtended()
        .passthru("dropped", Collections.singletonMap("refreshahead", null).keySet());
  }

  /**
   * Find processed statistic.
   *
   * @param cache the cache
   * @return the set
   */
  public static Set<ExtendedStatistics.Statistic<Number>> findProcessedStatistic(Ehcache cache) {
    return cache
        .getStatistics()
        .getExtended()
        .passthru("processed", Collections.singletonMap("refreshahead", null).keySet());
  }

  /**
   * Find backlog statistic.
   *
   * @param cache the cache
   * @return the set
   */
  public static Set<ExtendedStatistics.Statistic<Number>> findBacklogStatistic(Ehcache cache) {
    return cache
        .getStatistics()
        .getExtended()
        .passthru("backlog", Collections.singletonMap("refreshahead", null).keySet());
  }
}
Exemplo n.º 18
0
 public double getInSeconds() {
   return TimeUnit.SECONDS.convert(end - begin, TimeUnit.MILLISECONDS);
 }
  public static void reqhandlertests(
      RestTestHarness writeHarness, String testServerBaseUrl, CloudSolrClient cloudSolrClient)
      throws Exception {
    String payload =
        "{\n"
            + "'create-requesthandler' : { 'name' : '/x', 'class': 'org.apache.solr.handler.DumpRequestHandler' , 'startup' : 'lazy'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);

    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config/overlay?wt=json",
        cloudSolrClient,
        Arrays.asList("overlay", "requestHandler", "/x", "startup"),
        "lazy",
        10);

    payload =
        "{\n"
            + "'update-requesthandler' : { 'name' : '/x', 'class': 'org.apache.solr.handler.DumpRequestHandler' , 'startup' : 'lazy' , 'a':'b' , 'defaults': {'def_a':'def A val', 'multival':['a','b','c']}}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);

    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config/overlay?wt=json",
        cloudSolrClient,
        Arrays.asList("overlay", "requestHandler", "/x", "a"),
        "b",
        10);

    payload =
        "{\n"
            + "'update-requesthandler' : { 'name' : '/dump', "
            + "'initParams': 'a',"
            + "'class': 'org.apache.solr.handler.DumpRequestHandler' ,"
            + " 'defaults': {'a':'A','b':'B','c':'C'}}\n"
            + "}";

    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config/overlay?wt=json",
        cloudSolrClient,
        Arrays.asList("overlay", "requestHandler", "/dump", "defaults", "c"),
        "C",
        10);

    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/x?wt=json&getdefaults=true&json.nl=map",
        cloudSolrClient,
        Arrays.asList("getdefaults", "def_a"),
        "def A val",
        10);

    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/x?wt=json&param=multival&json.nl=map",
        cloudSolrClient,
        Arrays.asList("params", "multival"),
        Arrays.asList("a", "b", "c"),
        10);

    payload = "{\n" + "'delete-requesthandler' : '/x'" + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    boolean success = false;
    long startTime = System.nanoTime();
    int maxTimeoutSeconds = 10;
    while (TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS)
        < maxTimeoutSeconds) {
      String uri = "/config/overlay?wt=json";
      Map m =
          testServerBaseUrl == null
              ? getRespMap(uri, writeHarness)
              : TestSolrConfigHandlerConcurrent.getAsMap(testServerBaseUrl + uri, cloudSolrClient);
      if (null
          == Utils.getObjectByPath(
              m, true, Arrays.asList("overlay", "requestHandler", "/x", "a"))) {
        success = true;
        break;
      }
      Thread.sleep(100);
    }
    assertTrue("Could not delete requestHandler  ", success);

    payload =
        "{\n"
            + "'create-queryconverter' : { 'name' : 'qc', 'class': 'org.apache.solr.spelling.SpellingQueryConverter'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "queryConverter", "qc", "class"),
        "org.apache.solr.spelling.SpellingQueryConverter",
        10);
    payload =
        "{\n"
            + "'update-queryconverter' : { 'name' : 'qc', 'class': 'org.apache.solr.spelling.SuggestQueryConverter'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "queryConverter", "qc", "class"),
        "org.apache.solr.spelling.SuggestQueryConverter",
        10);

    payload = "{\n" + "'delete-queryconverter' : 'qc'" + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "queryConverter", "qc"),
        null,
        10);

    payload =
        "{\n"
            + "'create-searchcomponent' : { 'name' : 'tc', 'class': 'org.apache.solr.handler.component.TermsComponent'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "searchComponent", "tc", "class"),
        "org.apache.solr.handler.component.TermsComponent",
        10);
    payload =
        "{\n"
            + "'update-searchcomponent' : { 'name' : 'tc', 'class': 'org.apache.solr.handler.component.TermVectorComponent' }\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "searchComponent", "tc", "class"),
        "org.apache.solr.handler.component.TermVectorComponent",
        10);

    payload = "{\n" + "'delete-searchcomponent' : 'tc'" + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "searchComponent", "tc"),
        null,
        10);
    // <valueSourceParser name="countUsage"
    // class="org.apache.solr.core.CountUsageValueSourceParser"/>
    payload =
        "{\n"
            + "'create-valuesourceparser' : { 'name' : 'cu', 'class': 'org.apache.solr.core.CountUsageValueSourceParser'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "valueSourceParser", "cu", "class"),
        "org.apache.solr.core.CountUsageValueSourceParser",
        10);
    //  <valueSourceParser name="nvl" class="org.apache.solr.search.function.NvlValueSourceParser">
    //    <float name="nvlFloatValue">0.0</float>
    //    </valueSourceParser>
    payload =
        "{\n"
            + "'update-valuesourceparser' : { 'name' : 'cu', 'class': 'org.apache.solr.search.function.NvlValueSourceParser'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "valueSourceParser", "cu", "class"),
        "org.apache.solr.search.function.NvlValueSourceParser",
        10);

    payload = "{\n" + "'delete-valuesourceparser' : 'cu'" + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "valueSourceParser", "cu"),
        null,
        10);
    //    <transformer name="mytrans2"
    // class="org.apache.solr.response.transform.ValueAugmenterFactory" >
    //    <int name="value">5</int>
    //    </transformer>
    payload =
        "{\n"
            + "'create-transformer' : { 'name' : 'mytrans', 'class': 'org.apache.solr.response.transform.ValueAugmenterFactory', 'value':'5'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "transformer", "mytrans", "class"),
        "org.apache.solr.response.transform.ValueAugmenterFactory",
        10);

    payload =
        "{\n"
            + "'update-transformer' : { 'name' : 'mytrans', 'class': 'org.apache.solr.response.transform.ValueAugmenterFactory', 'value':'6'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    testForResponseElement(
        writeHarness,
        testServerBaseUrl,
        "/config?wt=json",
        cloudSolrClient,
        Arrays.asList("config", "transformer", "mytrans", "value"),
        "6",
        10);

    payload =
        "{\n"
            + "'delete-transformer' : 'mytrans',"
            + "'create-initparams' : { 'name' : 'hello', 'key':'val'}\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    Map map =
        testForResponseElement(
            writeHarness,
            testServerBaseUrl,
            "/config?wt=json",
            cloudSolrClient,
            Arrays.asList("config", "transformer", "mytrans"),
            null,
            10);

    List l = (List) Utils.getObjectByPath(map, false, Arrays.asList("config", "initParams"));
    assertNotNull("no object /config/initParams : " + TestBlobHandler.getAsString(map), l);
    assertEquals(1, l.size());
    assertEquals("val", ((Map) l.get(0)).get("key"));

    payload =
        "{\n"
            + "    'add-searchcomponent': {\n"
            + "        'name': 'myspellcheck',\n"
            + "        'class': 'solr.SpellCheckComponent',\n"
            + "        'queryAnalyzerFieldType': 'text_general',\n"
            + "        'spellchecker': {\n"
            + "            'name': 'default',\n"
            + "            'field': '_text_',\n"
            + "            'class': 'solr.DirectSolrSpellChecker'\n"
            + "        }\n"
            + "    }\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    map =
        testForResponseElement(
            writeHarness,
            testServerBaseUrl,
            "/config?wt=json",
            cloudSolrClient,
            Arrays.asList("config", "searchComponent", "myspellcheck", "spellchecker", "class"),
            "solr.DirectSolrSpellChecker",
            10);

    payload =
        "{\n"
            + "    'add-requesthandler': {\n"
            + "        name : '/dump100',\n"
            + "        class : 'org.apache.solr.handler.DumpRequestHandler',"
            + "        suggester: [{name: s1,lookupImpl: FuzzyLookupFactory, dictionaryImpl : DocumentDictionaryFactory},"
            + "                    {name: s2,lookupImpl: FuzzyLookupFactory , dictionaryImpl : DocumentExpressionDictionaryFactory}]"
            + "    }\n"
            + "}";
    runConfigCommand(writeHarness, "/config?wt=json", payload);
    map =
        testForResponseElement(
            writeHarness,
            testServerBaseUrl,
            "/config?wt=json",
            cloudSolrClient,
            Arrays.asList("config", "requestHandler", "/dump100", "class"),
            "org.apache.solr.handler.DumpRequestHandler",
            10);

    map = getRespMap("/dump100?wt=json&json.nl=arrmap&initArgs=true", writeHarness);
    List initArgs = (List) map.get("initArgs");
    assertEquals(2, initArgs.size());
    assertTrue(((Map) initArgs.get(0)).containsKey("suggester"));
    assertTrue(((Map) initArgs.get(1)).containsKey("suggester"));
    System.out.println(map);
  }
Exemplo n.º 20
0
 public long getMediaFileDurationInSec() {
   return TimeUnit.SECONDS.convert(mVideoDuration, TimeUnit.MICROSECONDS);
 }
/**
 * ShardedDistributedMessageQueue is a Cassandra backed client driven message queue.
 *
 * <p>Key features 1. Time partition circular row key set used to time bound how much a wide row can
 * grow. This, along with an aggressive gc_grace_seconds will give cassandra a chance to clear out
 * the row before the clients cycle back to the time partition. Only one partition is active at any
 * given time. 2. Mod sharding per partition based on message time. This solves the problem of lock
 * contention on the acitve time partition. 3. Smart processing of partitions and shards to read
 * mostly from the current time shard but allowing some cycle for processing older shards 4.
 * Read-ack model of removing elements from the queue. As part of removing an element from the queue
 * the client inserts a timeout message. Once the message has been processed the timeout message is
 * removed from the queue. Otherwise it will be processed if it's time arrived and it is still in
 * the queue. 5. Batch read of events 6. Batch insert of events
 *
 * <p>Algorithm:
 *
 * <p>Messages are stored as columns in an index where the columns are stored in time order. The
 * time can be the current time for immediate execution or future time for recurring or scheduled
 * messages. Jobs will be processed in time order.
 *
 * <p>To achieve higher scalability the job queue (implemented as a row) is sharded by a user
 * provided shard. Rows also implement a rolling time window which is used to alleviate tombstone
 * pressure
 *
 * <p>Enque:
 *
 * <p>Deque: 1. Lock + read top N columns 2. Select M jobs to process Select jobs in <state> =
 * scheduled If any jobs are marked as processing then delete and update their state 3. Release the
 * lock with a mutation that has a delete for the columns being processed and insert with the same
 * data but <state> = processing 4. Process the jobs 5. If the processing thread is about to enter a
 * section which is not repeatable then update the column by changing the state to NotRepeatable. 6.
 * Issue a delete for processed job
 *
 * <p>Schema: RowKey: TimeBucket + Shard Column: <type><priority><timeuuid><state> Value: Job Data
 *
 * <p><type> 0 - Lock meta 1 - Queue item <state> 0 - Lock columns - There are special columns that
 * are used to lock the row 1 - Scheduled 2 - Processing - timeuuid = timeout 3 - NotRepeatable -
 * special indicator that tells the queue that the job is not replayble since there could be a
 * persistence
 *
 * <p>Recurring Messages:
 *
 * <p>Column families: Queue KeyLookup History
 *
 * @author elandau
 */
public class ShardedDistributedMessageQueue implements MessageQueue {
  private static final Logger LOG = LoggerFactory.getLogger(ShardedDistributedMessageQueue.class);

  public static final char COMPOSITE_ID_DELIMITER = ':';
  public static final char COMPOSITE_KEY_DELIMITER = '$';
  public static final String DEFAULT_COLUMN_FAMILY_NAME = "Queues";
  public static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.CL_LOCAL_QUORUM;
  public static final RetryPolicy DEFAULT_RETRY_POLICY = RunOnce.get();
  public static final long DEFAULT_LOCK_TIMEOUT =
      TimeUnit.MICROSECONDS.convert(30, TimeUnit.SECONDS);
  public static final Integer DEFAULT_LOCK_TTL =
      (int) TimeUnit.SECONDS.convert(2, TimeUnit.MINUTES);
  public static final Integer DEFAULT_METADATA_DELETE_TTL =
      (int) TimeUnit.SECONDS.convert(2, TimeUnit.SECONDS);
  public static final Boolean DEFAULT_POISON_QUEUE_ENABLED = false;
  public static final String DEFAULT_QUEUE_SUFFIX = "_queue";
  public static final String DEFAULT_METADATA_SUFFIX = "_metadata";
  public static final String DEFAULT_HISTORY_SUFFIX = "_history";
  public static final long SCHEMA_CHANGE_DELAY = 3000;
  public static final ImmutableMap<String, Object> DEFAULT_COLUMN_FAMILY_SETTINGS =
      ImmutableMap.<String, Object>builder()
          .put("read_repair_chance", 1.0)
          .put("gc_grace_seconds", 5) // TODO: Calculate gc_grace_seconds
          .put("compaction_strategy", "SizeTieredCompactionStrategy")
          .build();

  static final AnnotatedCompositeSerializer<MessageQueueEntry> entrySerializer =
      new AnnotatedCompositeSerializer<MessageQueueEntry>(MessageQueueEntry.class);
  static final AnnotatedCompositeSerializer<MessageMetadataEntry> metadataSerializer =
      new AnnotatedCompositeSerializer<MessageMetadataEntry>(MessageMetadataEntry.class);

  static final ObjectMapper mapper = new ObjectMapper();

  {
    mapper.getSerializationConfig().setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
    mapper.enableDefaultTyping();
  }

  /** @author elandau */
  public static class Builder {
    private String columnFamilyName = DEFAULT_COLUMN_FAMILY_NAME;
    private ShardLockManager lockManager;

    private Keyspace keyspace;
    private ConsistencyLevel consistencyLevel = DEFAULT_CONSISTENCY_LEVEL;
    private long lockTimeout = DEFAULT_LOCK_TIMEOUT;
    private int lockTtl = DEFAULT_LOCK_TTL;
    private String queueName = MessageQueueMetadata.DEFAULT_QUEUE_NAME;
    private int metadataDeleteTTL = DEFAULT_METADATA_DELETE_TTL;
    private Collection<MessageQueueHooks> hooks = Lists.newArrayList();
    private MessageQueueMetadata metadata = new MessageQueueMetadata();
    private MessageQueueStats stats;
    private Boolean bPoisonQueueEnabled = DEFAULT_POISON_QUEUE_ENABLED;
    private Map<String, Object> columnFamilySettings = DEFAULT_COLUMN_FAMILY_SETTINGS;
    private ShardReaderPolicy.Factory shardReaderPolicyFactory;
    private ModShardPolicy modShardPolicy;

    public Builder() {
      metadata.setQueueName(queueName);
    }

    public Builder withColumnFamily(String columnFamilyName) {
      this.columnFamilyName = columnFamilyName;
      return this;
    }

    public Builder withMetadata(MessageQueueMetadata metadata) {
      this.metadata = metadata;
      return this;
    }

    public Builder withShardCount(int count) {
      this.metadata.setShardCount(count);
      return this;
    }

    public Builder withTimeBuckets(int bucketCount, int bucketDuration, TimeUnit units) {
      this.metadata.setPartitionDuration(TimeUnit.MICROSECONDS.convert(bucketDuration, units));
      this.metadata.setPartitionCount(bucketCount);
      return this;
    }

    /** @deprecated Use withTimeBuckets instead */
    public Builder withBuckets(int bucketCount, int bucketDuration, TimeUnit units) {
      return withTimeBuckets(bucketCount, bucketDuration, units);
    }

    public Builder withRetentionTimeout(Long timeout, TimeUnit units) {
      this.metadata.setRetentionTimeout(timeout, units);
      return this;
    }

    public Builder withLockTimeout(Long timeout, TimeUnit units) {
      this.lockTimeout = TimeUnit.MICROSECONDS.convert(timeout, units);
      return this;
    }

    public Builder withLockTtl(Long ttl, TimeUnit units) {
      this.lockTtl = (int) TimeUnit.SECONDS.convert(ttl, units);
      return this;
    }

    /**
     * Define this on the ShardReaderPolicy instead
     *
     * @param internval
     * @param units
     * @return
     */
    @Deprecated
    public Builder withPollInterval(Long internval, TimeUnit units) {
      this.metadata.setPollInterval(TimeUnit.MILLISECONDS.convert(internval, units));
      return this;
    }

    public Builder withQueueName(String queueName) {
      this.metadata.setQueueName(queueName);
      return this;
    }

    public Builder withConsistencyLevel(ConsistencyLevel level) {
      this.consistencyLevel = level;
      return this;
    }

    public Builder withColumnFamilySettings(Map<String, Object> settings) {
      this.columnFamilySettings = settings;
      return this;
    }

    public Builder withKeyspace(Keyspace keyspace) {
      this.keyspace = keyspace;
      return this;
    }

    public Builder withStats(MessageQueueStats stats) {
      this.stats = stats;
      return this;
    }

    public Builder withHook(MessageQueueHooks hooks) {
      this.hooks.add(hooks);
      return this;
    }

    public Builder withHooks(Collection<MessageQueueHooks> hooks) {
      this.hooks.addAll(hooks);
      return this;
    }

    public Builder withPoisonQueue(Boolean enabled) {
      this.bPoisonQueueEnabled = enabled;
      return this;
    }

    public Builder withModShardPolicy(ModShardPolicy policy) {
      this.modShardPolicy = policy;
      return this;
    }

    public Builder withShardReaderPolicy(final ShardReaderPolicy shardReaderPolicy) {
      this.shardReaderPolicyFactory =
          new ShardReaderPolicy.Factory() {
            @Override
            public ShardReaderPolicy create(MessageQueueMetadata metadata) {
              return shardReaderPolicy;
            }
          };
      return this;
    }

    public Builder withShardReaderPolicy(ShardReaderPolicy.Factory shardReaderPolicyFactory) {
      this.shardReaderPolicyFactory = shardReaderPolicyFactory;
      return this;
    }

    public Builder withShardLockManager(ShardLockManager mgr) {
      this.lockManager = mgr;
      return this;
    }

    public ShardedDistributedMessageQueue build() throws MessageQueueException {
      Preconditions.checkArgument(
          TimeUnit.SECONDS.convert(lockTimeout, TimeUnit.MICROSECONDS) < lockTtl,
          "Timeout "
              + lockTtl
              + " seconds must be less than TTL "
              + TimeUnit.SECONDS.convert(lockTtl, TimeUnit.MICROSECONDS)
              + " seconds");
      Preconditions.checkNotNull(keyspace, "Must specify keyspace");

      if (shardReaderPolicyFactory == null)
        shardReaderPolicyFactory = TimePartitionedShardReaderPolicy.Factory.builder().build();

      if (modShardPolicy == null) modShardPolicy = TimeModShardPolicy.getInstance();

      if (stats == null) stats = new CountingQueueStats();

      return new ShardedDistributedMessageQueue(this);
    }
  }

  // Immutable after configuration
  final ShardLockManager lockManager;
  final ColumnFamily<String, MessageQueueEntry> queueColumnFamily;
  final ColumnFamily<String, MessageMetadataEntry> keyIndexColumnFamily;
  final ColumnFamily<String, UUID> historyColumnFamily;

  final Keyspace keyspace;
  final ConsistencyLevel consistencyLevel;
  final long lockTimeout;
  final int lockTtl;
  final int metadataDeleteTTL;
  final Collection<MessageQueueHooks> hooks;
  final MessageQueueMetadata metadata;
  final Boolean bPoisonQueueEnabled;
  final Map<String, Object> columnFamilySettings;
  final ShardReaderPolicy shardReaderPolicy;
  final ModShardPolicy modShardPolicy;
  final Function<String, Message> invalidMessageHandler =
      new Function<String, Message>() {
        @Override
        public Message apply(String input) {
          LOG.warn("Invalid message: " + input);
          return null;
        }
      };

  final MessageQueueStats stats;
  final AtomicLong counter = new AtomicLong(new Random().nextInt(1000));

  private ShardedDistributedMessageQueue(Builder builder) throws MessageQueueException {
    this.queueColumnFamily =
        ColumnFamily.newColumnFamily(
            builder.columnFamilyName + DEFAULT_QUEUE_SUFFIX,
            StringSerializer.get(),
            entrySerializer);
    this.keyIndexColumnFamily =
        ColumnFamily.newColumnFamily(
            builder.columnFamilyName + DEFAULT_METADATA_SUFFIX,
            StringSerializer.get(),
            metadataSerializer);
    this.historyColumnFamily =
        ColumnFamily.newColumnFamily(
            builder.columnFamilyName + DEFAULT_HISTORY_SUFFIX,
            StringSerializer.get(),
            TimeUUIDSerializer.get());

    this.consistencyLevel = builder.consistencyLevel;
    this.keyspace = builder.keyspace;
    this.hooks = builder.hooks;
    this.modShardPolicy = builder.modShardPolicy;
    this.lockManager = builder.lockManager;
    this.lockTimeout = builder.lockTimeout;
    this.lockTtl = builder.lockTtl;
    this.bPoisonQueueEnabled = builder.bPoisonQueueEnabled;
    this.metadata = builder.metadata;
    this.columnFamilySettings = builder.columnFamilySettings;
    this.metadataDeleteTTL = builder.metadataDeleteTTL;
    this.stats = builder.stats;

    this.shardReaderPolicy = builder.shardReaderPolicyFactory.create(metadata);

    //        try {
    //            Column<MessageQueueEntry> column = keyspace.prepareQuery(queueColumnFamily)
    //                    .setConsistencyLevel(consistencyLevel)
    //                    .getRow(getName())
    //                    .getColumn(MessageQueueEntry.newMetadataEntry())
    //                    .execute()
    //                    .getResult();
    //
    //            ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
    //            MessageQueueSettings existingSettings = mapper.readValue(bais,
    // MessageQueueSettings.class);
    //
    //            // TODO: Override some internal settings with those persisted in the queue
    // metadata
    //        }
    //        catch (NotFoundException e) {
    //            LOG.info("Message queue metadata not found.  Queue does not exist in CF and will
    // be created now.");
    //        }
    //        catch (BadRequestException e) {
    //            if (e.isUnconfiguredColumnFamilyError()) {
    //                LOG.info("Column family does not exist.  Call createStorage() to create column
    // family.");
    //            }
    //            else {
    //                throw new MessageQueueException("Error getting message queue metadata", e);
    //            }
    //        }
    //        catch (Exception e) {
    //            throw new MessageQueueException("Error getting message queue metadata", e);
    //        }
  }

  /**
   * Return the shard for this message
   *
   * @param message
   * @return
   */
  String getShardKey(Message message) {
    return getShardKey(
        message.getTokenTime(), this.modShardPolicy.getMessageShard(message, metadata));
  }

  /**
   * Return the shard for this timestamp
   *
   * @param message
   * @return
   */
  private String getShardKey(long messageTime, int modShard) {
    long timePartition;
    if (metadata.getPartitionDuration() != null)
      timePartition =
          (messageTime / metadata.getPartitionDuration()) % metadata.getPartitionCount();
    else timePartition = 0;
    return getName() + ":" + timePartition + ":" + modShard;
  }

  String getCompositeKey(String name, String key) {
    return name + COMPOSITE_KEY_DELIMITER + key;
  }

  private static String[] splitCompositeKey(String key) throws MessageQueueException {
    String[] parts = StringUtils.split(key, COMPOSITE_KEY_DELIMITER);

    if (parts.length != 2) {
      throw new MessageQueueException(
          "Invalid key '" + key + "'.  Expected format <queue|shard>$<name>. ");
    }

    return parts;
  }

  <T> String serializeToString(T trigger)
      throws JsonGenerationException, JsonMappingException, IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    mapper.writeValue(baos, trigger);
    baos.flush();
    return baos.toString();
  }

  private <T> T deserializeString(String data, Class<T> clazz)
      throws JsonParseException, JsonMappingException, IOException {
    return (T) mapper.readValue(new ByteArrayInputStream(data.getBytes()), clazz);
  }

  @SuppressWarnings({"unused", "unchecked"})
  private <T> T deserializeString(String data, String className)
      throws JsonParseException, JsonMappingException, IOException, ClassNotFoundException {
    return (T)
        mapper.readValue(new ByteArrayInputStream(data.getBytes()), Class.forName(className));
  }

  @Override
  public String getName() {
    return metadata.getQueueName();
  }

  @Override
  public long getMessageCount() throws MessageQueueException {
    Map<String, Integer> counts = getShardCounts();
    long count = 0;
    for (Integer value : counts.values()) {
      count += value;
    }
    return count;
  }

  @Override
  public Map<String, Integer> getShardCounts() throws MessageQueueException {
    try {
      List<String> keys = Lists.newArrayList();
      for (int i = 0; i < metadata.getPartitionCount(); i++) {
        for (int j = 0; j < metadata.getShardCount(); j++) {
          keys.add(getName() + ":" + i + ":" + j);
        }
      }

      Map<String, Integer> result = Maps.newTreeMap();
      result.putAll(
          keyspace
              .prepareQuery(queueColumnFamily)
              .getKeySlice(keys)
              .getColumnCounts()
              .execute()
              .getResult());
      return result;
    } catch (ConnectionException e) {
      throw new MessageQueueException("Failed to get counts", e);
    }
  }

  @Override
  public void clearMessages() throws MessageQueueException {
    LOG.info("Clearing messages from '" + getName() + "'");
    MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);

    for (MessageQueueShard partition : shardReaderPolicy.listShards()) {
      mb.withRow(queueColumnFamily, partition.getName()).delete();
    }

    try {
      mb.execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Failed to clear messages from queue " + getName(), e);
    }
  }

  @Override
  public void deleteQueue() throws MessageQueueException {
    LOG.info("Deleting queue '" + getName() + "'");
    MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);

    for (MessageQueueShard partition : shardReaderPolicy.listShards()) {
      mb.withRow(queueColumnFamily, partition.getName()).delete();
    }

    mb.withRow(queueColumnFamily, getName());

    try {
      mb.execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Failed to clear messages from queue " + getName(), e);
    }
  }

  @Override
  public Message peekMessage(String messageId) throws MessageQueueException {
    String[] parts = splitCompositeKey(messageId);

    String shardKey = parts[0];
    MessageQueueEntry entry = new MessageQueueEntry(parts[1]);

    try {
      Column<MessageQueueEntry> column =
          keyspace
              .prepareQuery(queueColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getKey(shardKey)
              .getColumn(entry)
              .execute()
              .getResult();
      try {
        ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
        return mapper.readValue(bais, Message.class);
      } catch (Exception e) {
        LOG.warn("Error parsing message", e);
        // Error parsing the message so we pass it on to the invalid message handler.
        try {
          return invalidMessageHandler.apply(column.getStringValue());
        } catch (Exception e2) {
          LOG.warn("Error handling invalid message message", e2);
          throw new MessageQueueException("Error parsing message " + messageId);
        }
      }
    } catch (NotFoundException e) {
      return null;
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error getting message " + messageId, e);
    }
  }

  @Override
  public List<Message> peekMessagesByKey(String key) throws MessageQueueException {
    String groupRowKey = getCompositeKey(getName(), key);
    List<Message> messages = Lists.newArrayList();
    try {
      ColumnList<MessageMetadataEntry> columns =
          keyspace
              .prepareQuery(keyIndexColumnFamily)
              .getRow(groupRowKey)
              .withColumnRange(
                  metadataSerializer
                      .buildRange()
                      .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .build())
              .execute()
              .getResult();

      for (Column<MessageMetadataEntry> entry : columns) {
        if (entry.getTtl() != 0) continue;

        Message message = peekMessage(entry.getName().getName());
        if (message != null) {
          messages.add(peekMessage(entry.getName().getName()));
        } else {
          LOG.warn("No queue item for " + entry.getName());
        }
      }
    } catch (NotFoundException e) {
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error fetching row " + groupRowKey, e);
    }
    return messages;
  }

  @Override
  public Message peekMessageByKey(String key) throws MessageQueueException {
    String groupRowKey = getCompositeKey(getName(), key);
    try {
      ColumnList<MessageMetadataEntry> columns =
          keyspace
              .prepareQuery(keyIndexColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getRow(groupRowKey)
              .withColumnRange(
                  metadataSerializer
                      .buildRange()
                      .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .build())
              .execute()
              .getResult();

      for (Column<MessageMetadataEntry> entry : columns) {
        if (entry.getTtl() != 0) continue;
        // Return the first one we get.  Hmmm... maybe we want to do some validation checks here
        return peekMessage(entry.getName().getName());
      }
      return null;
    } catch (NotFoundException e) {
      return null;
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error fetching row " + groupRowKey, e);
    }
  }

  @Override
  public boolean deleteMessageByKey(String key) throws MessageQueueException {
    MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);

    String groupRowKey = getCompositeKey(getName(), key);
    try {
      ColumnList<MessageMetadataEntry> columns =
          keyspace
              .prepareQuery(keyIndexColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getRow(groupRowKey)
              .withColumnRange(
                  metadataSerializer
                      .buildRange()
                      .greaterThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .lessThanEquals((byte) MessageMetadataEntryType.MessageId.ordinal())
                      .build())
              .execute()
              .getResult();

      for (Column<MessageMetadataEntry> entry : columns) {
        String[] parts = splitCompositeKey(entry.getName().getName());

        String shardKey = parts[0];
        MessageQueueEntry queueEntry = new MessageQueueEntry(parts[1]);

        mb.withRow(queueColumnFamily, shardKey).deleteColumn(queueEntry);
      }

      mb.withRow(keyIndexColumnFamily, groupRowKey).delete();
    } catch (NotFoundException e) {
      return false;
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error fetching row " + groupRowKey, e);
    }

    try {
      mb.execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error deleting queue item " + groupRowKey, e);
    }

    return true;
  }

  @Override
  public void deleteMessage(String messageId) throws MessageQueueException {
    String[] parts = splitCompositeKey(messageId);

    String shardKey = parts[0];
    MessageQueueEntry entry = new MessageQueueEntry(parts[1]);

    try {
      keyspace
          .prepareColumnMutation(queueColumnFamily, shardKey, entry)
          .setConsistencyLevel(consistencyLevel)
          .deleteColumn()
          .execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error deleting message " + messageId, e);
    }
  }

  @Override
  public void deleteMessages(Collection<String> messageIds) throws MessageQueueException {
    MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);

    for (String messageId : messageIds) {
      String[] parts = splitCompositeKey(messageId);
      String shardKey = parts[0];
      MessageQueueEntry entry = new MessageQueueEntry(parts[1]);

      mb.withRow(queueColumnFamily, shardKey).deleteColumn(entry);
    }

    try {
      mb.execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error deleting messages " + messageIds, e);
    }
  }

  private void changeSchema(Callable<Void> callable) throws MessageQueueException {
    for (int i = 0; i < 3; i++) {
      try {
        callable.call();
        try {
          Thread.sleep(SCHEMA_CHANGE_DELAY);
        } catch (InterruptedException ie) {
          Thread.currentThread().interrupt();
          throw new MessageQueueException(
              "Interrupted while trying to create column family for queue " + getName(), ie);
        }
        return;
      } catch (SchemaDisagreementException e) {
        try {
          Thread.sleep(SCHEMA_CHANGE_DELAY);
        } catch (InterruptedException ie) {
          Thread.currentThread().interrupt();
          throw new MessageQueueException(
              "Interrupted while trying to create column family for queue " + getName(), ie);
        }
      } catch (Exception e) {
        if (e.getMessage().contains("already exist")) return;
        throw new MessageQueueException(
            "Failed to create column family for " + queueColumnFamily.getName(), e);
      }
    }
  }

  @Override
  public void createStorage() throws MessageQueueException {
    changeSchema(
        new Callable<Void>() {
          @Override
          public Void call() throws Exception {
            keyspace.createColumnFamily(
                queueColumnFamily,
                ImmutableMap.<String, Object>builder()
                    .put("key_validation_class", "UTF8Type")
                    .put(
                        "comparator_type",
                        "CompositeType(BytesType, BytesType(reversed=true), TimeUUIDType, TimeUUIDType, BytesType)")
                    .putAll(columnFamilySettings)
                    .build());
            return null;
          }
        });

    changeSchema(
        new Callable<Void>() {
          @Override
          public Void call() throws Exception {
            keyspace.createColumnFamily(
                keyIndexColumnFamily,
                ImmutableMap.<String, Object>builder()
                    .put("key_validation_class", "UTF8Type")
                    .put("comparator_type", "CompositeType(BytesType, UTF8Type)")
                    .putAll(columnFamilySettings)
                    .build());
            return null;
          }
        });

    changeSchema(
        new Callable<Void>() {
          @Override
          public Void call() throws Exception {
            keyspace.createColumnFamily(
                historyColumnFamily,
                ImmutableMap.<String, Object>builder()
                    .put("default_validation_class", "UTF8Type")
                    .putAll(columnFamilySettings)
                    .build());
            return null;
          }
        });
  }

  @Override
  public void dropStorage() throws MessageQueueException {
    try {
      keyspace.dropColumnFamily(this.queueColumnFamily);
      try {
        Thread.sleep(SCHEMA_CHANGE_DELAY);
      } catch (InterruptedException e) {
      }
    } catch (ConnectionException e) {
      if (!e.getMessage().contains("already exist"))
        throw new MessageQueueException(
            "Failed to create column family for " + queueColumnFamily.getName(), e);
    }

    try {
      keyspace.dropColumnFamily(this.keyIndexColumnFamily);
      try {
        Thread.sleep(SCHEMA_CHANGE_DELAY);
      } catch (InterruptedException e) {
      }
    } catch (ConnectionException e) {
      if (!e.getMessage().contains("already exist"))
        throw new MessageQueueException(
            "Failed to create column family for " + queueColumnFamily.getName(), e);
    }
  }

  @Override
  public void createQueue() throws MessageQueueException {
    try {
      // Convert the message object to JSON
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      mapper.writeValue(baos, metadata);
      baos.flush();
      keyspace
          .prepareColumnMutation(queueColumnFamily, getName(), MessageQueueEntry.newMetadataEntry())
          .putValue(baos.toByteArray(), null)
          .execute();
    } catch (ConnectionException e) {
      throw new MessageQueueException(
          "Failed to create column family for " + queueColumnFamily.getName(), e);
    } catch (Exception e) {
      throw new MessageQueueException(
          "Error serializing queue settings " + queueColumnFamily.getName(), e);
    }
  }

  @Override
  public MessageConsumer createConsumer() {
    return new MessageConsumerImpl(this);
  }

  @Override
  public MessageProducer createProducer() {
    return new MessageProducer() {
      @Override
      public String sendMessage(Message message) throws MessageQueueException {
        SendMessageResponse response = sendMessages(Lists.newArrayList(message));
        if (!response.getNotUnique().isEmpty())
          throw new KeyExistsException("Key already exists ." + message.getKey());
        return Iterables.getFirst(response.getMessages().entrySet(), null).getKey();
      }

      @Override
      public SendMessageResponse sendMessages(Collection<Message> messages)
          throws MessageQueueException {
        Map<String, Message> uniqueKeys = Maps.newHashMap();
        Set<String> notUniqueKeys = Sets.newHashSet();
        List<Message> notUniqueMessages = Lists.newArrayList();

        MutationBatch mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
        MessageMetadataEntry lockColumn = MessageMetadataEntry.newUnique();

        // Get list of keys that must be unique and prepare the mutation for phase 1
        for (Message message : messages) {
          if (message.hasUniqueKey()) {
            String groupKey = getCompositeKey(getName(), message.getKey());
            uniqueKeys.put(groupKey, message);
            mb.withRow(keyIndexColumnFamily, groupKey)
                .putEmptyColumn(lockColumn, (Integer) lockTtl);
          }
        }

        // We have some keys that need to be unique
        if (!uniqueKeys.isEmpty()) {
          // Submit phase 1: Create a unique column for ALL of the unique keys
          try {
            mb.execute();
          } catch (ConnectionException e) {
            throw new MessageQueueException(
                "Failed to check keys for uniqueness (1): " + uniqueKeys, e);
          }

          // Phase 2: Read back ALL the lock columms
          mb = keyspace.prepareMutationBatch().setConsistencyLevel(consistencyLevel);
          Rows<String, MessageMetadataEntry> result;
          try {
            result =
                keyspace
                    .prepareQuery(keyIndexColumnFamily)
                    .setConsistencyLevel(consistencyLevel)
                    .getRowSlice(uniqueKeys.keySet())
                    .withColumnRange(
                        metadataSerializer
                            .buildRange()
                            .greaterThanEquals((byte) MessageMetadataEntryType.Unique.ordinal())
                            .lessThanEquals((byte) MessageMetadataEntryType.Unique.ordinal())
                            .build())
                    .execute()
                    .getResult();
          } catch (ConnectionException e) {
            throw new MessageQueueException(
                "Failed to check keys for uniqueness (2): " + uniqueKeys, e);
          }

          for (Row<String, MessageMetadataEntry> row : result) {
            // This key is already taken, roll back the check
            if (row.getColumns().size() != 1) {
              String messageKey = splitCompositeKey(row.getKey())[1];

              notUniqueKeys.add(messageKey);
              notUniqueMessages.add(uniqueKeys.get(messageKey));
              mb.withRow(keyIndexColumnFamily, row.getKey()).deleteColumn(lockColumn);
            }
            // This key is now unique
            else {
              mb.withRow(keyIndexColumnFamily, row.getKey()).putEmptyColumn(lockColumn);
            }
          }
        }

        // Commit the messages
        Map<String, Message> success = Maps.newLinkedHashMap();
        for (Message message : messages) {
          if (message.hasKey() && notUniqueKeys.contains(message.getKey())) continue;

          String messageId = fillMessageMutation(mb, message);
          success.put(messageId, message);
        }

        try {
          mb.execute();
        } catch (ConnectionException e) {
          throw new MessageQueueException("Failed to insert messages into queue.", e);
        }

        return new SendMessageResponse(success, notUniqueMessages);
      }
    };
  }

  String fillMessageMutation(MutationBatch mb, Message message) throws MessageQueueException {
    // Get the execution time from the message or set to current time so it runs immediately
    long curTimeMicros;
    if (!message.hasTrigger()) {
      curTimeMicros =
          TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
    } else {
      curTimeMicros =
          TimeUnit.MICROSECONDS.convert(
              message.getTrigger().getTriggerTime(), TimeUnit.MILLISECONDS);
    }
    curTimeMicros += (counter.incrementAndGet() % 1000);

    // Update the message for the new token
    message.setToken(TimeUUIDUtils.getMicrosTimeUUID(curTimeMicros));

    // Set up the queue entry
    MessageQueueEntry entry =
        MessageQueueEntry.newMessageEntry(
            message.getPriority(), message.getToken(), MessageQueueEntryState.Waiting);

    // Convert the message object to JSON
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    try {
      mapper.writeValue(baos, message);
      baos.flush();
    } catch (Exception e) {
      throw new MessageQueueException("Failed to serialize message data: " + message, e);
    }

    // Write the queue entry
    String shardKey = getShardKey(message);
    mb.withRow(queueColumnFamily, shardKey)
        .putColumn(entry, new String(baos.toByteArray()), metadata.getRetentionTimeout());

    // Write the lookup from queue key to queue entry
    if (message.hasKey()) {
      mb.withRow(keyIndexColumnFamily, getCompositeKey(getName(), message.getKey()))
          .putEmptyColumn(
              MessageMetadataEntry.newMessageId(getCompositeKey(shardKey, entry.getMessageId())),
              metadata.getRetentionTimeout());
    }

    // Allow hook processing
    for (MessageQueueHooks hook : hooks) {
      hook.beforeSendMessage(message, mb);
    }

    // Update state and retun the token
    stats.incSendMessageCount();
    return getCompositeKey(shardKey, entry.getMessageId());
  }

  /**
   * Return history for a single key for the specified time range
   *
   * <p>TODO: honor the time range :)
   */
  @Override
  public List<MessageHistory> getKeyHistory(String key, Long startTime, Long endTime, int count)
      throws MessageQueueException {
    List<MessageHistory> list = Lists.newArrayList();
    ColumnList<UUID> columns;
    try {
      columns =
          keyspace
              .prepareQuery(historyColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getRow(key)
              .execute()
              .getResult();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Failed to load history for " + key, e);
    }

    for (Column<UUID> column : columns) {
      try {
        list.add(deserializeString(column.getStringValue(), MessageHistory.class));
      } catch (Exception e) {
        LOG.info("Error deserializing history entry", e);
      }
    }
    return list;
  }

  /**
   * Iterate through shards attempting to extract itemsToPeek items. Will return once itemToPeek
   * items have been read or all shards have been checked.
   *
   * <p>Note that this call does not take into account the message trigger time and will likely
   * return messages that aren't due to be executed yet.
   *
   * @return List of items
   */
  @Override
  public List<Message> peekMessages(int itemsToPeek) throws MessageQueueException {
    List<Message> messages = Lists.newArrayList();

    for (MessageQueueShard shard : shardReaderPolicy.listShards()) {
      messages.addAll(peekMessages(shard.getName(), itemsToPeek - messages.size()));

      if (messages.size() == itemsToPeek) return messages;
    }

    return messages;
  }

  /**
   * Peek into messages contained in the shard. This call does not take trigger time into account
   * and will return messages that are not yet due to be executed
   *
   * @param shardName
   * @param itemsToPop
   * @return
   * @throws MessageQueueException
   */
  private Collection<Message> peekMessages(String shardName, int itemsToPeek)
      throws MessageQueueException {
    try {
      ColumnList<MessageQueueEntry> result =
          keyspace
              .prepareQuery(queueColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getKey(shardName)
              .withColumnRange(
                  new RangeBuilder()
                      .setLimit(itemsToPeek)
                      .setStart(
                          entrySerializer
                              .makeEndpoint(
                                  (byte) MessageQueueEntryType.Message.ordinal(),
                                  Equality.GREATER_THAN_EQUALS)
                              .toBytes())
                      .setEnd(
                          entrySerializer
                              .makeEndpoint(
                                  (byte) MessageQueueEntryType.Message.ordinal(),
                                  Equality.LESS_THAN_EQUALS)
                              .toBytes())
                      .build())
              .execute()
              .getResult();

      List<Message> messages = Lists.newArrayListWithCapacity(result.size());
      for (Column<MessageQueueEntry> column : result) {
        Message message = extractMessageFromColumn(column);
        if (message != null) messages.add(message);
      }
      return messages;
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error peeking for messages from shard " + shardName, e);
    }
  }

  /**
   * Extract a message body from a column
   *
   * @param column
   * @return
   */
  Message extractMessageFromColumn(Column<MessageQueueEntry> column) {
    // Next, parse the message metadata and add a timeout entry
    Message message = null;
    try {
      ByteArrayInputStream bais = new ByteArrayInputStream(column.getByteArrayValue());
      message = mapper.readValue(bais, Message.class);
    } catch (Exception e) {
      LOG.warn("Error processing message ", e);
      try {
        message = invalidMessageHandler.apply(column.getStringValue());
      } catch (Exception e2) {
        LOG.warn("Error processing invalid message", e2);
      }
    }
    return message;
  }

  /**
   * Fast check to see if a shard has messages to process
   *
   * @param shardName
   * @throws MessageQueueException
   */
  private boolean hasMessages(String shardName) throws MessageQueueException {
    UUID currentTime = TimeUUIDUtils.getUniqueTimeUUIDinMicros();

    try {
      ColumnList<MessageQueueEntry> result =
          keyspace
              .prepareQuery(queueColumnFamily)
              .setConsistencyLevel(consistencyLevel)
              .getKey(shardName)
              .withColumnRange(
                  new RangeBuilder()
                      .setLimit(1) // Read extra messages because of the lock column
                      .setStart(
                          entrySerializer
                              .makeEndpoint(
                                  (byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL)
                              .toBytes())
                      .setEnd(
                          entrySerializer
                              .makeEndpoint(
                                  (byte) MessageQueueEntryType.Message.ordinal(), Equality.EQUAL)
                              .append((byte) 0, Equality.EQUAL)
                              .append(currentTime, Equality.LESS_THAN_EQUALS)
                              .toBytes())
                      .build())
              .execute()
              .getResult();
      return !result.isEmpty();
    } catch (ConnectionException e) {
      throw new MessageQueueException("Error checking shard for messages. " + shardName, e);
    }
  }

  @Override
  public Map<String, MessageQueueShardStats> getShardStats() {
    return shardReaderPolicy.getShardStats();
  }

  public ShardReaderPolicy getShardReaderPolicy() {
    return shardReaderPolicy;
  }

  public ColumnFamily<String, MessageQueueEntry> getQueueColumnFamily() {
    return this.queueColumnFamily;
  }

  public ColumnFamily<String, MessageMetadataEntry> getKeyIndexColumnFamily() {
    return this.keyIndexColumnFamily;
  }

  public ColumnFamily<String, UUID> getHistoryColumnFamily() {
    return this.historyColumnFamily;
  }
}
Exemplo n.º 22
0
 public long elapsedTimeSeconds() {
   return TimeUnit.SECONDS.convert((end - start), TimeUnit.NANOSECONDS);
 }
 protected long getTimestamp(Bundle result) {
   return TimeUnit.SECONDS.convert(result.getLong(getDateColumnName()), getDateColumnTimeUnit());
 }
Exemplo n.º 24
0
  @Override
  public void run() {
    try {
      logger.debug("INSIDE THE RUN METHOD OF: {} ", this.getClass().getName());
      logger.debug("The test id is:" + testSuiteId);
      if (testSuiteStatusDao == null) {
        logger.debug("The test doa is null");
      }
      TestSuiteStatus testSuiteStatus = testSuiteStatusDao.getByTestSuiteId(testSuiteId);
      TestSuite testSuite = testSuiteStatus.getTestSuite();
      Set<TestSuiteTest> tests = testSuite.getTestSuiteTests();
      List<Long> testIds = new ArrayList<Long>();
      for (TestSuiteTest testSuiteTest : tests) {
        Test aTest = testSuiteTest.getTest();
        testIds.add(aTest.getId());
        testMonitor.scheduleATest(new TestExecutor(aTest.getId()));
      }
      logger.debug("Total number of tests in test suite: " + testIds.size());
      Thread.sleep(15000);

      TestSuiteResult result = new TestSuiteResult();
      result.setTestSuiteName(testSuite.getTestSuiteName());
      result.setTestSuiteDescription(testSuite.getTestSuiteDescription());
      result.setTotalTests(testIds.size());
      result.setTotalFailed(0);
      result.setTotalPassed(0);
      List<FailedTestResult> failedTests = new ArrayList<FailedTestResult>();
      FailedTestResult failedResult;
      // get the updates now
      long start = System.nanoTime();
      for (Long testId : testIds) {
        TestStatus testStatus = testStatusDao.getByTestId(testId);
        long stop = System.nanoTime();
        long timeTaken = TimeUnit.SECONDS.convert((stop - start), TimeUnit.NANOSECONDS);
        while (timeTaken < 120 && testStatus.getTestStatus().equals(TestStatus.RUNNING)) {
          Thread.sleep(5000);
        }
        if (testStatus.getTestStatus().equals(TestStatus.RUNNING)) {
          result.setTotalFailed(result.getTotalFailed() + 1);
          failedResult = new FailedTestResult();
          failedResult.setTestName(testStatus.getTest().getTestName());
          failedResult.setFailMessage("The Test is taking too long to run");
          failedTests.add(failedResult);
        } else if (testStatus.getTestStatus().equals(TestStatus.FAILED)) {
          result.setTotalFailed(result.getTotalFailed() + 1);
          failedResult = new FailedTestResult();
          failedResult.setTestName(testStatus.getTest().getTestName());
          failedResult.setFailMessage("hmm. this test failed");
          failedTests.add(failedResult);
        } else if (testStatus.getTestStatus().equals(TestStatus.PASSED)) {
          result.setTotalPassed(result.getTotalPassed() + 1);
        }
      }
      result.setFailedTests(failedTests);

      // generate a test report.
      String testReportURL = testMonitor.generateTestSuiteReport(result);

      // update the test with the URL link
      testSuiteStatus.setReportName(testReportURL);

      // update the status
      logger.debug("ABOUT TO SAVE THE STATUS");
      testSuiteStatus.setTestSuiteStatus(TestSuiteStatus.COMPLETED);
      testSuiteStatusDao.save(testSuiteStatus);
      logger.debug("UPDATED AND SAVED THE STATUS");
    } catch (Exception exp) {
      final Writer result = new StringWriter();
      final PrintWriter printWriter = new PrintWriter(result);
      exp.printStackTrace(printWriter);
      logger.debug(result.toString());
    }
  }
Exemplo n.º 25
0
 /**
  * Get seconds count between two millis time
  *
  * @param latestTimeMillis long
  * @param oldestTimeMillis long
  * @return int
  */
 static int getSecondsBetweenTimes(long latestTimeMillis, long oldestTimeMillis) {
   return (int)
       TimeUnit.SECONDS.convert((latestTimeMillis - oldestTimeMillis), TimeUnit.MILLISECONDS);
 }
 public Builder withLockTtl(Long ttl, TimeUnit units) {
   this.lockTtl = (int) TimeUnit.SECONDS.convert(ttl, units);
   return this;
 }
Exemplo n.º 27
0
 public Message setTimeout(long timeout, TimeUnit units) {
   this.timeout = (int) TimeUnit.SECONDS.convert(timeout, units);
   return this;
 }
Exemplo n.º 28
0
 /**
  * 新建一个实例。
  *
  * @param discardLimit 打牌限时
  * @param cpkLimit 其他操作限时
  * @param timeUnit 时间单位
  */
 public SimpleTimeLimitStrategy(int limit, TimeUnit timeUnit) {
   this.limit = (int) TimeUnit.SECONDS.convert(limit, timeUnit);
 }
Exemplo n.º 29
0
 /**
  * Returns the start time in s.
  *
  * @return the start time in s.
  */
 public long startTimeS() {
   return TimeUnit.SECONDS.convert(startNs, TimeUnit.NANOSECONDS);
 }
Exemplo n.º 30
-1
  private void serialize(Query query, OutputStream outputStream) throws IOException {
    JsonGenerator g = jsonFactory.createJsonGenerator(outputStream, JsonEncoding.UTF8);
    g.useDefaultPrettyPrinter();
    g.writeStartObject();
    g.writeStringField("name", "jmxtrans");
    g.writeStringField("type", "metric");
    g.writeStringField("handler", sensuhandler);

    StringBuffer jsonoutput = new StringBuffer();
    List<String> typeNames = getTypeNames();
    for (Result result : query.getResults()) {
      Map<String, Object> resultValues = result.getValues();
      if (resultValues != null) {
        for (Map.Entry<String, Object> values : resultValues.entrySet()) {
          if (NumberUtils.isNumeric(values.getValue())) {
            Object value = values.getValue();
            jsonoutput
                .append(JmxUtils.getKeyString(query, result, values, typeNames, null))
                .append(" ")
                .append(value)
                .append(" ")
                .append(TimeUnit.SECONDS.convert(result.getEpoch(), TimeUnit.MILLISECONDS))
                .append(System.getProperty("line.separator"));
          }
        }
      }
    }
    g.writeStringField("output", jsonoutput.toString());
    g.writeEndObject();
    g.flush();
    g.close();
  }