@Override
 public void endWindow() {
   // Process any remaining tuples.
   if (tuples.size() - batchStartIdx > 0) {
     processBatch();
   }
   super.endWindow();
   tuples.clear();
   batchStartIdx = 0;
 }
  @Override
  public void setup(Context.OperatorContext context) {
    super.setup(context);

    LOG.info("Done setting up super");
    aggregatorRegistry.setup();

    // Create prepared statements
    schema = new DimensionalConfigurationSchema(eventSchema, aggregatorRegistry);

    List<FieldsDescriptor> keyFDs = schema.getDimensionsDescriptorIDToKeyDescriptor();

    for (int ddID = 0; ddID < keyFDs.size(); ddID++) {

      LOG.info("ddID {}", ddID);
      FieldsDescriptor keyFD = keyFDs.get(ddID);
      Int2ObjectMap<FieldsDescriptor> aggIDToAggFD =
          schema.getDimensionsDescriptorIDToAggregatorIDToOutputAggregatorDescriptor().get(ddID);

      Map<Integer, PreparedStatement> aggIDToStatement = ddIDToAggIDToStatement.get(ddID);

      if (aggIDToStatement == null) {
        aggIDToStatement = Maps.newHashMap();
        ddIDToAggIDToStatement.put(ddID, aggIDToStatement);
      }

      for (Map.Entry<String, String> aggTable : tableNames.get(ddID).entrySet()) {
        int aggID = aggregatorRegistry.getIncrementalAggregatorNameToID().get(aggTable.getKey());

        LOG.info("aggID {}", aggID);
        FieldsDescriptor aggFD = aggIDToAggFD.get(aggID);

        List<String> keyNames = keyFD.getFieldList();
        keyNames.remove(DimensionsDescriptor.DIMENSION_TIME_BUCKET);
        List<String> aggregateNames = aggFD.getFieldList();
        String tableName = aggTable.getValue();

        String statementString = buildStatement(tableName, keyNames, aggregateNames);

        try {
          aggIDToStatement.put(aggID, store.getConnection().prepareStatement(statementString));
        } catch (SQLException ex) {
          throw new RuntimeException(ex);
        }
      }
    }
  }