@BeforeClass
 public void setup() throws NamingException, AnalyticsException, IOException {
   GenericUtils.clearGlobalCustomDataSourceRepo();
   System.setProperty(
       GenericUtils.WSO2_ANALYTICS_CONF_DIRECTORY_SYS_PROP, "src/test/resources/conf1");
   AnalyticsServiceHolder.setHazelcastInstance(null);
   AnalyticsServiceHolder.setAnalyticsClusterManager(new AnalyticsClusterManagerImpl());
   System.setProperty(AnalyticsServiceHolder.FORCE_INDEXING_ENV_PROP, Boolean.TRUE.toString());
   this.service = ServiceHolder.getAnalyticsDataService();
   ServiceHolder.setAnalyticskExecutor(
       new SparkAnalyticsExecutor("localhost", 0, "src/test/resources/conf1"));
   ServiceHolder.getAnalyticskExecutor().startSparkServer("src/test/resources/conf1");
 }
 private void updateIncProcessingTS() {
   try {
     long existingIncTS =
         ServiceHolder.getIncrementalMetaStore()
             .getLastProcessedTimestamp(this.tenantId, this.incID, false);
     if (existingIncTS < this.incMaxTS) {
       ServiceHolder.getIncrementalMetaStore()
           .setLastProcessedTimestamp(this.tenantId, this.incID, this.incMaxTS, false);
     }
   } catch (AnalyticsException e) {
     throw new RuntimeException(e.getMessage(), e);
   }
 }
 @Override
 public Partition[] getPartitions() {
   AnalyticsDataResponse resp;
   try {
     resp =
         ServiceHolder.getAnalyticsDataService()
             .get(
                 this.tenantId,
                 this.tableName,
                 computePartitions(),
                 this.allColumns,
                 timeFrom,
                 timeTo,
                 0,
                 -1);
   } catch (AnalyticsException e) {
     throw new RuntimeException(e.getMessage(), e);
   }
   RecordGroup[] rgs = resp.getRecordGroups();
   Partition[] result = new Partition[rgs.length];
   for (int i = 0; i < result.length; i++) {
     result[i] = new AnalyticsPartition(resp.getRecordStoreName(), rgs[i], i);
   }
   return result;
 }
 @SuppressWarnings({"rawtypes", "unchecked"})
 @Override
 public scala.collection.Iterator<Row> compute(Partition split, TaskContext context) {
   AnalyticsPartition partition = (AnalyticsPartition) split;
   try {
     Iterator<Record> recordsItr =
         ServiceHolder.getAnalyticsDataService()
             .readRecords(partition.getRecordStoreName(), partition.getRecordGroup());
     return new InterruptibleIterator(
         context,
         asScalaIterator(
             new RowRecordIteratorAdaptor(recordsItr, this.tenantId, this.incEnable, this.incID)));
   } catch (AnalyticsException e) {
     throw new RuntimeException(e.getMessage(), e);
   }
 }
 private int computePartitions() throws AnalyticsException {
   if (ServiceHolder.getAnalyticskExecutor() != null) {
     return ServiceHolder.getAnalyticskExecutor().getNumPartitionsHint();
   }
   return AnalyticsConstants.SPARK_DEFAULT_PARTITION_COUNT;
 }
 @AfterClass
 public void done() throws NamingException, AnalyticsException, IOException {
   ServiceHolder.getAnalyticskExecutor().stop();
   this.service.destroy();
   System.clearProperty(AnalyticsServiceHolder.FORCE_INDEXING_ENV_PROP);
 }
 /** @return the initialized {@link JavaSparkContext} */
 public JavaSparkContext getJavaSparkContext() {
   return ServiceHolder.getJavaSparkContext();
 }