@Override
 public void process() throws IOException {
   long startTime = System.currentTimeMillis();
   try {
     Status status = this.splitTaskExecutor.exec(wal, mode, reporter);
     switch (status) {
       case DONE:
         endTask(
             zkw,
             new SplitLogTask.Done(this.serverName, this.mode),
             SplitLogCounters.tot_wkr_task_done,
             curTask,
             curTaskZKVersion.intValue());
         break;
       case PREEMPTED:
         SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
         LOG.warn("task execution prempted " + wal);
         break;
       case ERR:
         if (server != null && !server.isStopped()) {
           endTask(
               zkw,
               new SplitLogTask.Err(this.serverName, this.mode),
               SplitLogCounters.tot_wkr_task_err,
               curTask,
               curTaskZKVersion.intValue());
           break;
         }
         // if the RS is exiting then there is probably a tons of stuff
         // that can go wrong. Resign instead of signaling error.
         // $FALL-THROUGH$
       case RESIGNED:
         if (server != null && server.isStopped()) {
           LOG.info("task execution interrupted because worker is exiting " + curTask);
         }
         endTask(
             zkw,
             new SplitLogTask.Resigned(this.serverName, this.mode),
             SplitLogCounters.tot_wkr_task_resigned,
             curTask,
             curTaskZKVersion.intValue());
         break;
     }
   } finally {
     LOG.info(
         "worker "
             + serverName
             + " done with task "
             + curTask
             + " in "
             + (System.currentTimeMillis() - startTime)
             + "ms");
     this.inProgressTasks.decrementAndGet();
   }
 }
 @Test
 public void testHugeFile() throws Exception {
   setupPartition(PARTITION_ID_2, 2);
   int numMsgs = 1000;
   int msgsPerLine = 10;
   RecoveryManager rm = (RecoveryManager) ManagementContainer.getInstance().getRecoveryManager();
   rm.setMsgIdPerLine(msgsPerLine);
   String archiveName = "activerecovery.multilinetest.archive";
   createActiveRecoveryTestFile(archiveName, msgsPerLine, numMsgs, PARTITION_ID_2);
   MutableInt chunkPartition = new MutableInt(-1);
   MutableLong chunkSeekChar = new MutableLong(-1);
   TimeQueryArchiveBuilder builder;
   do {
     resetPipeline();
     builder =
         new TimeQueryArchiveBuilder(
             1,
             1,
             m_pipeline,
             m_quota,
             archiveName,
             chunkPartition.intValue(),
             chunkSeekChar.longValue());
     builder.processTimeQuery(1L, 2L);
     parseChunkHint(builder.getChunkHint(), chunkPartition, chunkSeekChar);
     builder.finish();
   } while (builder.hasMoreChunks());
   assertEquals(numMsgs, m_processedIds.size());
 }
  /**
   * get the available agent count map in all regions of the user, including the free agents and
   * user specified agents.
   *
   * @param user current user
   * @return user available agent count map
   */
  @Override
  @Transactional
  public Map<String, MutableInt> getUserAvailableAgentCountMap(User user) {
    Set<String> regions = getRegions();
    Map<String, MutableInt> availShareAgents = newHashMap(regions);
    Map<String, MutableInt> availUserOwnAgent = newHashMap(regions);
    for (String region : regions) {
      availShareAgents.put(region, new MutableInt(0));
      availUserOwnAgent.put(region, new MutableInt(0));
    }
    String myAgentSuffix = "_owned_" + user.getUserId();

    for (AgentInfo agentInfo : getAllActiveAgentInfoFromDB()) {
      // Skip the all agents which doesn't approved, is inactive or
      // doesn't have region
      // prefix.
      if (!agentInfo.isApproved()) {
        continue;
      }

      String fullRegion = agentInfo.getRegion();
      String region = extractRegionFromAgentRegion(fullRegion);
      if (StringUtils.isBlank(region) || !regions.contains(region)) {
        continue;
      }
      // It's my own agent
      if (fullRegion.endsWith(myAgentSuffix)) {
        incrementAgentCount(availUserOwnAgent, region, user.getUserId());
      } else if (fullRegion.contains("_owned_")) {
        // If it's the others agent.. skip..
        continue;
      } else {
        incrementAgentCount(availShareAgents, region, user.getUserId());
      }
    }

    int maxAgentSizePerConsole = getMaxAgentSizePerConsole();

    for (String region : regions) {
      MutableInt mutableInt = availShareAgents.get(region);
      int shareAgentCount = mutableInt.intValue();
      mutableInt.setValue(Math.min(shareAgentCount, maxAgentSizePerConsole));
      mutableInt.add(availUserOwnAgent.get(region));
    }
    return availShareAgents;
  }
  @Test
  public void testLineLengthChangeSmallerFile() throws Exception {
    setupPartition(PARTITION_ID_2, 2);
    int numMsgs = 1000;
    int msgsPerLineStart = 100;
    int msgsPerLineEnd = 10;
    RecoveryManager rm = (RecoveryManager) ManagementContainer.getInstance().getRecoveryManager();
    rm.setMsgIdPerLine(msgsPerLineStart);
    String archiveName = "activerecovery.multilinetest.archive";
    createActiveRecoveryTestFile(archiveName, msgsPerLineStart, numMsgs, PARTITION_ID_2);

    // set to smaller line length after file was created
    rm.setMsgIdPerLine(msgsPerLineEnd);
    MutableInt chunkPartition = new MutableInt(-1);
    MutableLong chunkSeekChar = new MutableLong(-1);
    TimeQueryArchiveBuilder builder;
    int iterations = 0;
    do {
      resetPipeline();
      builder =
          new TimeQueryArchiveBuilder(
              1,
              1,
              m_pipeline,
              m_quota,
              archiveName,
              chunkPartition.intValue(),
              chunkSeekChar.longValue());
      builder.processTimeQuery(1L, 2L);
      parseChunkHint(builder.getChunkHint(), chunkPartition, chunkSeekChar);
      builder.finish();
      iterations++;
    } while (builder.hasMoreChunks());
    assertEquals(
        numMsgs / msgsPerLineStart,
        iterations); // we'll ask for 10, but we'll get 100 in each batch
    assertEquals(numMsgs, m_processedIds.size());
  }
Exemple #5
0
  @Test
  public void testWriteAheadLog() throws Exception {
    final MutableInt flushCount = new MutableInt();
    final MutableBoolean isClosed = new MutableBoolean(false);
    LogicalPlan dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, testMeta.dir);
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new FSStorageAgent(testMeta.dir, null));

    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    PhysicalPlan plan = scm.getPhysicalPlan();
    Journal j = scm.getJournal();
    ByteArrayOutputStream bos =
        new ByteArrayOutputStream() {
          @Override
          public void flush() throws IOException {
            super.flush();
            flushCount.increment();
          }

          @Override
          public void close() throws IOException {
            super.close();
            isClosed.setValue(true);
          }
        };
    j.setOutputStream(new DataOutputStream(bos));

    PTOperator o1p1 = plan.getOperators(dag.getMeta(o1)).get(0);
    assertEquals(PTOperator.State.PENDING_DEPLOY, o1p1.getState());
    String externalId = new MockContainer(scm, o1p1.getContainer()).container.getExternalId();
    assertEquals("flush count", 1, flushCount.intValue());

    o1p1.setState(PTOperator.State.ACTIVE);
    assertEquals(PTOperator.State.ACTIVE, o1p1.getState());
    assertEquals("flush count", 2, flushCount.intValue());
    assertEquals("is closed", false, isClosed.booleanValue());

    // this will close the stream. There are 2 calls to flush() during the close() - one in Kryo
    // Output and one
    // in FilterOutputStream
    j.setOutputStream(null);
    assertEquals("flush count", 4, flushCount.intValue());
    assertEquals("is closed", true, isClosed.booleanValue());

    // output stream is closed, so state will be changed without recording it in the journal
    o1p1.setState(PTOperator.State.INACTIVE);
    assertEquals(PTOperator.State.INACTIVE, o1p1.getState());
    assertEquals("flush count", 4, flushCount.intValue());

    ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
    j.replay(new DataInputStream(bis));
    assertEquals(PTOperator.State.ACTIVE, o1p1.getState());

    InetSocketAddress addr1 = InetSocketAddress.createUnresolved("host1", 1);
    PTContainer c1 = plan.getContainers().get(0);
    c1.setState(PTContainer.State.ALLOCATED);
    c1.host = "host1";
    c1.bufferServerAddress = addr1;
    c1.setAllocatedMemoryMB(2);
    c1.setRequiredMemoryMB(1);
    c1.setAllocatedVCores(3);
    c1.setRequiredVCores(4);

    j.setOutputStream(new DataOutputStream(bos));
    j.write(c1.getSetContainerState());

    c1.setExternalId(null);
    c1.setState(PTContainer.State.NEW);
    c1.setExternalId(null);
    c1.host = null;
    c1.bufferServerAddress = null;

    bis = new ByteArrayInputStream(bos.toByteArray());
    j.replay(new DataInputStream(bis));

    assertEquals(externalId, c1.getExternalId());
    assertEquals(PTContainer.State.ALLOCATED, c1.getState());
    assertEquals("host1", c1.host);
    assertEquals(addr1, c1.bufferServerAddress);
    assertEquals(1, c1.getRequiredMemoryMB());
    assertEquals(2, c1.getAllocatedMemoryMB());
    assertEquals(3, c1.getAllocatedVCores());
    assertEquals(4, c1.getRequiredVCores());

    j.write(scm.getSetOperatorProperty("o1", "maxTuples", "100"));
    o1.setMaxTuples(10);
    j.setOutputStream(null);
    bis = new ByteArrayInputStream(bos.toByteArray());
    j.replay(new DataInputStream(bis));
    assertEquals(100, o1.getMaxTuples());

    j.setOutputStream(new DataOutputStream(bos));
    scm.setOperatorProperty("o1", "maxTuples", "10");
    assertEquals(10, o1.getMaxTuples());
    o1.setMaxTuples(100);
    assertEquals(100, o1.getMaxTuples());
    j.setOutputStream(null);

    bis = new ByteArrayInputStream(bos.toByteArray());
    j.replay(new DataInputStream(bis));
    assertEquals(10, o1.getMaxTuples());

    j.setOutputStream(new DataOutputStream(bos));
    scm.setPhysicalOperatorProperty(o1p1.getId(), "maxTuples", "50");
  }
  /**
   * Test.
   *
   * @throws IOException If an error occurs
   */
  @Test
  public void test() throws IOException {
    final List<String> realList;
    final List<String> simulatedList;
    final DefaultCategoryDataset dataset;
    final StringBuilder html;
    final MutableInt actualPropertyCount;
    final MutableInt remainingPropertyCount;
    if (browserVersion_ == BrowserVersion.INTERNET_EXPLORER_6) {
      realList = IE6_;
      simulatedList = IE6_SIMULATED_;
      dataset = CATEGORY_DATASET_IE6_;
      html = IE6_HTML_;
      actualPropertyCount = IE6_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = IE6_REMAINING_PROPERTY_COUNT_;
    } else if (browserVersion_ == BrowserVersion.INTERNET_EXPLORER_7) {
      realList = IE7_;
      simulatedList = IE7_SIMULATED_;
      dataset = CATEGORY_DATASET_IE7_;
      html = IE7_HTML_;
      actualPropertyCount = IE7_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = IE7_REMAINING_PROPERTY_COUNT_;
    } else if (browserVersion_ == BrowserVersion.INTERNET_EXPLORER_8) {
      realList = IE8_;
      simulatedList = IE8_SIMULATED_;
      dataset = CATEGORY_DATASET_IE8_;
      html = IE8_HTML_;
      actualPropertyCount = IE8_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = IE8_REMAINING_PROPERTY_COUNT_;
    } else if (browserVersion_ == BrowserVersion.FIREFOX_2) {
      realList = FF2_;
      simulatedList = FF2_SIMULATED_;
      dataset = CATEGORY_DATASET_FF2_;
      html = FF2_HTML_;
      actualPropertyCount = FF2_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = FF2_REMAINING_PROPERTY_COUNT_;
    } else if (browserVersion_ == BrowserVersion.FIREFOX_3) {
      realList = FF3_;
      simulatedList = FF3_SIMULATED_;
      dataset = CATEGORY_DATASET_FF3_;
      html = FF3_HTML_;
      actualPropertyCount = FF3_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = FF3_REMAINING_PROPERTY_COUNT_;
    } else if (browserVersion_ == BrowserVersion.FIREFOX_3_6) {
      realList = FF3_6_;
      simulatedList = FF3_6_SIMULATED_;
      dataset = CATEGORY_DATASET_FF3_6_;
      html = FF3_6_HTML_;
      actualPropertyCount = FF3_6_ACTUAL_PROPERTY_COUNT_;
      remainingPropertyCount = FF3_6_REMAINING_PROPERTY_COUNT_;
    } else {
      fail("Unknown BrowserVersion " + browserVersion_);
      return;
    }

    List<String> realProperties = Arrays.asList(getValueOf(realList, name_).split(","));
    List<String> simulatedProperties = Arrays.asList(getValueOf(simulatedList, name_).split(","));
    if (realProperties.size() == 1 && realProperties.get(0).length() == 0) {
      realProperties = new ArrayList<String>();
    }
    if (simulatedProperties.size() == 1 && simulatedProperties.get(0).length() == 0) {
      simulatedProperties = new ArrayList<String>();
    }
    final List<String> originalRealProperties = new ArrayList<String>(realProperties);
    removeParentheses(realProperties);
    removeParentheses(simulatedProperties);

    final List<String> erroredProperties = new ArrayList<String>(simulatedProperties);
    erroredProperties.removeAll(realProperties);

    final List<String> implementedProperties = new ArrayList<String>(simulatedProperties);
    implementedProperties.retainAll(realProperties);

    dataset.addValue(implementedProperties.size(), "Implemented", name_);
    dataset.addValue(
        realProperties.size(),
        browserVersion_.getNickname().replace("FF", "Firefox ").replace("IE", "Internet Explorer "),
        name_);
    dataset.addValue(erroredProperties.size(), "Should not be implemented", name_);

    final List<String> remainingProperties = new ArrayList<String>(realProperties);
    remainingProperties.removeAll(implementedProperties);

    actualPropertyCount.add(realProperties.size());
    remainingPropertyCount.add(remainingProperties.size());

    if (LOG.isDebugEnabled()) {
      LOG.debug(name_ + ':' + browserVersion_.getNickname() + ':' + realProperties);
      LOG.debug("Remaining" + ':' + remainingProperties);
      LOG.debug("Error" + ':' + erroredProperties);
    }

    appendHtml(html, originalRealProperties, simulatedProperties, erroredProperties);
    if (dataset.getColumnCount() == IE7_.size()) {
      saveChart(dataset);
      html.append("<tr><td colspan='3' align='right'><b>Total Implemented: ")
          .append(actualPropertyCount.intValue() - remainingPropertyCount.intValue())
          .append(" / ")
          .append(actualPropertyCount.intValue())
          .append("</b></td></tr>");
      html.append("</table>")
          .append('\n')
          .append("<br>")
          .append("Legend:")
          .append("<br>")
          .append("<span style='color: blue'>")
          .append("To be implemented")
          .append("</span>")
          .append("<br>")
          .append("<span style='color: green'>")
          .append("Implemented")
          .append("</span>")
          .append("<br>")
          .append("<span style='color: red'>")
          .append("Should not be implemented")
          .append("</span>")
          .append("</html>");

      FileUtils.writeStringToFile(
          new File(
              getArtifactsDirectory() + "/properties-" + browserVersion_.getNickname() + ".html"),
          html.toString());
    }
  }
  @Test
  public void testRecommender() throws Exception {
    MutableInt recommendCount = new MutableInt();
    Recommender mockRecommender = new MockRecommender(recommendCount);

    Recommender cachingRecommender = new CachingRecommender(mockRecommender);
    cachingRecommender.recommend(1, 1);
    assertEquals(1, recommendCount.intValue());
    cachingRecommender.recommend(2, 1);
    assertEquals(2, recommendCount.intValue());
    cachingRecommender.recommend(1, 1);
    assertEquals(2, recommendCount.intValue());
    cachingRecommender.recommend(2, 1);
    assertEquals(2, recommendCount.intValue());
    cachingRecommender.refresh(null);
    cachingRecommender.recommend(1, 1);
    assertEquals(3, recommendCount.intValue());
    cachingRecommender.recommend(2, 1);
    assertEquals(4, recommendCount.intValue());
    cachingRecommender.recommend(3, 1);
    assertEquals(5, recommendCount.intValue());

    // Results from this recommend() method can be cached...
    IDRescorer rescorer = NullRescorer.getItemInstance();
    cachingRecommender.refresh(null);
    cachingRecommender.recommend(1, 1, rescorer);
    assertEquals(6, recommendCount.intValue());
    cachingRecommender.recommend(2, 1, rescorer);
    assertEquals(7, recommendCount.intValue());
    cachingRecommender.recommend(1, 1, rescorer);
    assertEquals(7, recommendCount.intValue());
    cachingRecommender.recommend(2, 1, rescorer);
    assertEquals(7, recommendCount.intValue());

    // until you switch Rescorers
    cachingRecommender.recommend(1, 1, null);
    assertEquals(8, recommendCount.intValue());
    cachingRecommender.recommend(2, 1, null);
    assertEquals(9, recommendCount.intValue());

    cachingRecommender.refresh(null);
    cachingRecommender.estimatePreference(1, 1);
    assertEquals(10, recommendCount.intValue());
    cachingRecommender.estimatePreference(1, 2);
    assertEquals(11, recommendCount.intValue());
    cachingRecommender.estimatePreference(1, 2);
    assertEquals(11, recommendCount.intValue());
  }
  /**
   * Tests the use of a custom job manager.
   *
   * @throws Exception if an error occurs
   */
  @Test
  public void useCustomJobManager() throws Exception {
    final MutableInt jobCount = new MutableInt(0);
    final JavaScriptJobManager mgr =
        new JavaScriptJobManager() {
          /** Serial version UID. */
          private static final long serialVersionUID = 4189494067589390155L;
          /** {@inheritDoc} */
          public int waitForJobsStartingBefore(final long delayMillis) {
            return jobCount.intValue();
          }
          /** {@inheritDoc} */
          public int waitForJobs(final long timeoutMillis) {
            return jobCount.intValue();
          }
          /** {@inheritDoc} */
          public void stopJob(final int id) {
            // Empty.
          }
          /** {@inheritDoc} */
          public void shutdown() {
            // Empty.
          }
          /** {@inheritDoc} */
          public void removeJob(final int id) {
            // Empty.
          }
          /** {@inheritDoc} */
          public void removeAllJobs() {
            // Empty.
          }
          /** {@inheritDoc} */
          public int getJobCount() {
            return jobCount.intValue();
          }
          /** {@inheritDoc} */
          public int addJob(final JavaScriptJob job, final Page page) {
            jobCount.increment();
            return jobCount.intValue();
          }
        };

    final WebWindowListener listener =
        new WebWindowListener() {
          /** {@inheritDoc} */
          public void webWindowOpened(final WebWindowEvent event) {
            ((WebWindowImpl) event.getWebWindow()).setJobManager(mgr);
          }
          /** {@inheritDoc} */
          public void webWindowContentChanged(final WebWindowEvent event) {
            // Empty.
          }
          /** {@inheritDoc} */
          public void webWindowClosed(final WebWindowEvent event) {
            // Empty.
          }
        };

    final WebClient client = new WebClient();
    client.addWebWindowListener(listener);

    final TopLevelWindow window = (TopLevelWindow) client.getCurrentWindow();
    window.setJobManager(mgr);

    final MockWebConnection conn = new MockWebConnection();
    conn.setDefaultResponse(
        "<html><body><script>window.setTimeout('', 500);</script></body></html>");
    client.setWebConnection(conn);

    client.getPage(URL_FIRST);
    assertEquals(1, jobCount.intValue());

    client.getPage(URL_FIRST);
    assertEquals(2, jobCount.intValue());
  }
  private EventProcessingResult doAggregateRawEventsInternal() {
    if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) {
      throw new IllegalStateException(
          "The cluster lock "
              + AGGREGATION_LOCK_NAME
              + " must be owned by the current thread and server");
    }

    if (!this.portalEventDimensionPopulator.isCheckedDimensions()) {
      // First time aggregation has happened, run populateDimensions to ensure enough dimension data
      // exists
      final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions();
      if (!populatedDimensions) {
        this.logger.warn(
            "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown");
        return null;
      }
    }

    // Flush any dimension creation before aggregation
    final EntityManager entityManager = this.getEntityManager();
    entityManager.flush();
    entityManager.setFlushMode(FlushModeType.COMMIT);

    final IEventAggregatorStatus eventAggregatorStatus =
        eventAggregationManagementDao.getEventAggregatorStatus(ProcessingType.AGGREGATION, true);

    // Update status with current server name
    final String serverName = this.portalInfoProvider.getUniqueServerName();
    final String previousServerName = eventAggregatorStatus.getServerName();
    if (previousServerName != null && !serverName.equals(previousServerName)) {
      this.logger.debug(
          "Last aggregation run on {} clearing all aggregation caches", previousServerName);
      final Session session = getEntityManager().unwrap(Session.class);
      final Cache cache = session.getSessionFactory().getCache();
      cache.evictEntityRegions();
    }

    eventAggregatorStatus.setServerName(serverName);

    // Calculate date range for aggregation
    DateTime lastAggregated = eventAggregatorStatus.getLastEventDate();
    if (lastAggregated == null) {
      lastAggregated = portalEventDao.getOldestPortalEventTimestamp();

      // No portal events to aggregate, skip aggregation
      if (lastAggregated == null) {
        return new EventProcessingResult(0, null, null, true);
      }

      // First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time
      final IEventAggregatorStatus cleanUnclosedStatus =
          eventAggregationManagementDao.getEventAggregatorStatus(
              ProcessingType.CLEAN_UNCLOSED, true);
      AggregationIntervalInfo oldestMinuteInterval =
          this.intervalHelper.getIntervalInfo(AggregationInterval.MINUTE, lastAggregated);
      cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1));
      eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus);
    }

    final DateTime newestEventTime =
        DateTime.now().minus(this.aggregationDelay).secondOfMinute().roundFloorCopy();

    final Thread currentThread = Thread.currentThread();
    final String currentName = currentThread.getName();
    final MutableInt events = new MutableInt();
    final MutableObject lastEventDate = new MutableObject(newestEventTime);

    boolean complete;
    try {
      currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime);

      logger.debug(
          "Starting aggregation of events between {} (inc) and {} (exc)",
          lastAggregated,
          newestEventTime);

      // Do aggregation, capturing the start and end dates
      eventAggregatorStatus.setLastStart(DateTime.now());

      complete =
          portalEventDao.aggregatePortalEvents(
              lastAggregated,
              newestEventTime,
              this.eventAggregationBatchSize,
              new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus));

      eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue());
      eventAggregatorStatus.setLastEnd(DateTime.now());
    } finally {
      currentThread.setName(currentName);
    }

    // Store the results of the aggregation
    eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus);

    complete =
        complete
            && (this.eventAggregationBatchSize <= 0
                || events.intValue() < this.eventAggregationBatchSize);
    return new EventProcessingResult(
        events.intValue(), lastAggregated, eventAggregatorStatus.getLastEventDate(), complete);
  }
  @Override
  public Sequence<Result<SearchResultValue>> run(
      final Query<Result<SearchResultValue>> input, Map<String, Object> responseContext) {
    if (!(input instanceof SearchQuery)) {
      throw new ISE("Got a [%s] which isn't a %s", input.getClass(), SearchQuery.class);
    }

    final SearchQuery query = (SearchQuery) input;
    final Filter filter = Filters.convertDimensionFilters(query.getDimensionsFilter());
    final List<DimensionSpec> dimensions = query.getDimensions();
    final SearchQuerySpec searchQuerySpec = query.getQuery();
    final int limit = query.getLimit();
    final boolean descending = query.isDescending();

    // Closing this will cause segfaults in unit tests.
    final QueryableIndex index = segment.asQueryableIndex();

    if (index != null) {
      final TreeMap<SearchHit, MutableInt> retVal =
          Maps.newTreeMap(query.getSort().getComparator());

      Iterable<DimensionSpec> dimsToSearch;
      if (dimensions == null || dimensions.isEmpty()) {
        dimsToSearch =
            Iterables.transform(index.getAvailableDimensions(), Druids.DIMENSION_IDENTITY);
      } else {
        dimsToSearch = dimensions;
      }

      final BitmapFactory bitmapFactory = index.getBitmapFactoryForDimensions();

      final ImmutableBitmap baseFilter =
          filter == null
              ? null
              : filter.getBitmapIndex(new ColumnSelectorBitmapIndexSelector(bitmapFactory, index));

      for (DimensionSpec dimension : dimsToSearch) {
        final Column column = index.getColumn(dimension.getDimension());
        if (column == null) {
          continue;
        }

        final BitmapIndex bitmapIndex = column.getBitmapIndex();
        ExtractionFn extractionFn = dimension.getExtractionFn();
        if (extractionFn == null) {
          extractionFn = IdentityExtractionFn.getInstance();
        }
        if (bitmapIndex != null) {
          for (int i = 0; i < bitmapIndex.getCardinality(); ++i) {
            String dimVal = Strings.nullToEmpty(extractionFn.apply(bitmapIndex.getValue(i)));
            if (!searchQuerySpec.accept(dimVal)) {
              continue;
            }
            ImmutableBitmap bitmap = bitmapIndex.getBitmap(i);
            if (baseFilter != null) {
              bitmap = bitmapFactory.intersection(Arrays.asList(baseFilter, bitmap));
            }
            if (bitmap.size() > 0) {
              MutableInt counter = new MutableInt(bitmap.size());
              MutableInt prev =
                  retVal.put(new SearchHit(dimension.getOutputName(), dimVal), counter);
              if (prev != null) {
                counter.add(prev.intValue());
              }
              if (retVal.size() >= limit) {
                return makeReturnResult(limit, retVal);
              }
            }
          }
        }
      }

      return makeReturnResult(limit, retVal);
    }

    final StorageAdapter adapter = segment.asStorageAdapter();

    if (adapter == null) {
      log.makeAlert("WTF!? Unable to process search query on segment.")
          .addData("segment", segment.getIdentifier())
          .addData("query", query)
          .emit();
      throw new ISE(
          "Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
    }

    final Iterable<DimensionSpec> dimsToSearch;
    if (dimensions == null || dimensions.isEmpty()) {
      dimsToSearch =
          Iterables.transform(adapter.getAvailableDimensions(), Druids.DIMENSION_IDENTITY);
    } else {
      dimsToSearch = dimensions;
    }

    final Sequence<Cursor> cursors =
        adapter.makeCursors(filter, segment.getDataInterval(), QueryGranularity.ALL, descending);

    final TreeMap<SearchHit, MutableInt> retVal =
        cursors.accumulate(
            Maps.<SearchHit, SearchHit, MutableInt>newTreeMap(query.getSort().getComparator()),
            new Accumulator<TreeMap<SearchHit, MutableInt>, Cursor>() {
              @Override
              public TreeMap<SearchHit, MutableInt> accumulate(
                  TreeMap<SearchHit, MutableInt> set, Cursor cursor) {
                if (set.size() >= limit) {
                  return set;
                }

                Map<String, DimensionSelector> dimSelectors = Maps.newHashMap();
                for (DimensionSpec dim : dimsToSearch) {
                  dimSelectors.put(dim.getOutputName(), cursor.makeDimensionSelector(dim));
                }

                while (!cursor.isDone()) {
                  for (Map.Entry<String, DimensionSelector> entry : dimSelectors.entrySet()) {
                    final DimensionSelector selector = entry.getValue();

                    if (selector != null) {
                      final IndexedInts vals = selector.getRow();
                      for (int i = 0; i < vals.size(); ++i) {
                        final String dimVal = selector.lookupName(vals.get(i));
                        if (searchQuerySpec.accept(dimVal)) {
                          MutableInt counter = new MutableInt(1);
                          MutableInt prev = set.put(new SearchHit(entry.getKey(), dimVal), counter);
                          if (prev != null) {
                            counter.add(prev.intValue());
                          }
                          if (set.size() >= limit) {
                            return set;
                          }
                        }
                      }
                    }
                  }

                  cursor.advance();
                }

                return set;
              }
            });

    return makeReturnResult(limit, retVal);
  }