/**
   * Return queue depth of this Usergrid instance in JSON format.
   *
   * <p>By Default this end-point will ignore errors but if you call it with ignore_status=false
   * then it will return HTTP 500 if either the Entity store or the Index for the management
   * application are in a bad state.
   */
  @GET
  @RequireSystemAccess
  @Path("size")
  public ApiResponse getQueueDepth(
      @QueryParam("callback") @DefaultValue("callback") String callback) {

    ApiResponse response = createApiResponse();
    response.setAction("get queue depth");

    AsyncEventService eventService = injector.getInstance(AsyncEventService.class);
    ObjectNode node = JsonNodeFactory.instance.objectNode();

    node.put("queueDepth", eventService.getQueueDepth());

    response.setProperty("data", node);

    return response;
  }
  @Override
  public ReIndexStatus rebuildIndex(final ReIndexRequestBuilder reIndexRequestBuilder) {

    // load our last emitted Scope if a cursor is present

    final Optional<EdgeScope> cursor = parseCursor(reIndexRequestBuilder.getCursor());

    final CursorSeek<Edge> cursorSeek = getResumeEdge(cursor);

    final Optional<ApplicationScope> appId = reIndexRequestBuilder.getApplicationScope();

    Preconditions.checkArgument(
        !(cursor.isPresent() && appId.isPresent()),
        "You cannot specify an app id and a cursor.  When resuming with cursor you must omit the appid");

    final Observable<ApplicationScope> applicationScopes = getApplications(cursor, appId);

    final String jobId = StringUtils.sanitizeUUID(UUIDGenerator.newTimeUUID());

    final long modifiedSince = reIndexRequestBuilder.getUpdateTimestamp().or(Long.MIN_VALUE);

    // create an observable that loads a batch to be indexed

    final Observable<List<EdgeScope>> runningReIndex =
        allEntityIdsObservable
            .getEdgesToEntities(
                applicationScopes,
                reIndexRequestBuilder.getCollectionName(),
                cursorSeek.getSeekValue())
            .buffer(indexProcessorFig.getReindexBufferSize())
            .doOnNext(
                edges -> {
                  logger.info("Sending batch of {} to be indexed.", edges.size());
                  indexService.indexBatch(edges, modifiedSince);
                });

    // start our sampler and state persistence
    // take a sample every sample interval to allow us to resume state with minimal loss
    // create our flushing collector and flush the edge scopes to it
    runningReIndex
        .collect(
            () -> new FlushingCollector(jobId),
            ((flushingCollector, edgeScopes) -> flushingCollector.flushBuffer(edgeScopes)))
        .doOnNext(flushingCollector -> flushingCollector.complete())
        // subscribe on our I/O scheduler and run the task
        .subscribeOn(Schedulers.io())
        .subscribe(); // want reindex to continually run so leave subscribe.

    return new ReIndexStatus(jobId, Status.STARTED, 0, 0);
  }
  /**
   * 4. Delete all entity documents out of elasticsearch. 5. Compact Graph so that it deletes the
   * marked values. 6. Delete entity from cassandra using the map manager.
   */
  private Id deleteAsync(MapManager mapManager, ApplicationScope applicationScope, Id entityId) {
    try {
      // Step 4 && 5

      if (!skipIndexingForType(entityId.getType(), applicationScope)) {

        asyncEventService.queueEntityDelete(applicationScope, entityId);
      }
      // Step 6
      // delete from our UUID index
      mapManager.delete(entityId.getUuid().toString());
      return entityId;
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }