/**
   * MR Cumsum is currently based on a multipass algorithm of (1) preaggregation and (2) subsequent
   * offsetting. Note that we currently support one robust physical operator but many alternative
   * realizations are possible for specific scenarios (e.g., when the preaggregated intermediate fit
   * into the map task memory budget) or by creating custom job types.
   *
   * @return
   * @throws HopsException
   * @throws LopsException
   */
  private Lop constructLopsMRCumulativeUnary() throws HopsException, LopsException {
    Hop input = getInput().get(0);
    long rlen = input.getDim1();
    long clen = input.getDim2();
    long brlen = input.getRowsInBlock();
    long bclen = input.getColsInBlock();
    boolean force = !dimsKnown() || _etypeForced == ExecType.MR;
    OperationTypes aggtype = getCumulativeAggType();

    Lop X = input.constructLops();
    Lop TEMP = X;
    ArrayList<Lop> DATA = new ArrayList<Lop>();
    int level = 0;

    // recursive preaggregation until aggregates fit into CP memory budget
    while (((2 * OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen)
                    + OptimizerUtils.estimateSize(1, clen))
                > OptimizerUtils.getLocalMemBudget()
            && TEMP.getOutputParameters().getNumRows() > 1)
        || force) {
      DATA.add(TEMP);

      // preaggregation per block
      long rlenAgg = (long) Math.ceil((double) TEMP.getOutputParameters().getNumRows() / brlen);
      Lop preagg =
          new CumulativePartialAggregate(
              TEMP, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
      preagg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
      setLineNumbers(preagg);

      Group group = new Group(preagg, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
      group.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
      setLineNumbers(group);

      Aggregate agg =
          new Aggregate(
              group, HopsAgg2Lops.get(AggOp.SUM), getDataType(), getValueType(), ExecType.MR);
      agg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
      agg.setupCorrectionLocation(
          CorrectionLocationType
              .NONE); // aggregation uses kahanSum but the inputs do not have correction values
      setLineNumbers(agg);
      TEMP = agg;
      level++;
      force = false; // in case of unknowns, generate one level
    }

    // in-memory cum sum (of partial aggregates)
    if (TEMP.getOutputParameters().getNumRows() != 1) {
      int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
      Unary unary1 =
          new Unary(
              TEMP, HopsOpOp1LopsU.get(_op), DataType.MATRIX, ValueType.DOUBLE, ExecType.CP, k);
      unary1
          .getOutputParameters()
          .setDimensions(TEMP.getOutputParameters().getNumRows(), clen, brlen, bclen, -1);
      setLineNumbers(unary1);
      TEMP = unary1;
    }

    // split, group and mr cumsum
    while (level-- > 0) {
      double init = getCumulativeInitValue();
      CumulativeSplitAggregate split =
          new CumulativeSplitAggregate(TEMP, DataType.MATRIX, ValueType.DOUBLE, init);
      split.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
      setLineNumbers(split);

      Group group1 =
          new Group(DATA.get(level), Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
      group1.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
      setLineNumbers(group1);

      Group group2 = new Group(split, Group.OperationTypes.Sort, DataType.MATRIX, ValueType.DOUBLE);
      group2.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
      setLineNumbers(group2);

      CumulativeOffsetBinary binary =
          new CumulativeOffsetBinary(
              group1, group2, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.MR);
      binary.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
      setLineNumbers(binary);
      TEMP = binary;
    }

    return TEMP;
  }
  /**
   * @return
   * @throws HopsException
   * @throws LopsException
   */
  private Lop constructLopsSparkCumulativeUnary() throws HopsException, LopsException {
    Hop input = getInput().get(0);
    long rlen = input.getDim1();
    long clen = input.getDim2();
    long brlen = input.getRowsInBlock();
    long bclen = input.getColsInBlock();
    boolean force = !dimsKnown() || _etypeForced == ExecType.SPARK;
    OperationTypes aggtype = getCumulativeAggType();

    Lop X = input.constructLops();
    Lop TEMP = X;
    ArrayList<Lop> DATA = new ArrayList<Lop>();
    int level = 0;

    // recursive preaggregation until aggregates fit into CP memory budget
    while (((2 * OptimizerUtils.estimateSize(TEMP.getOutputParameters().getNumRows(), clen)
                    + OptimizerUtils.estimateSize(1, clen))
                > OptimizerUtils.getLocalMemBudget()
            && TEMP.getOutputParameters().getNumRows() > 1)
        || force) {
      DATA.add(TEMP);

      // preaggregation per block (for spark, the CumulativePartialAggregate subsumes both
      // the preaggregation and subsequent block aggregation)
      long rlenAgg = (long) Math.ceil((double) TEMP.getOutputParameters().getNumRows() / brlen);
      Lop preagg =
          new CumulativePartialAggregate(
              TEMP, DataType.MATRIX, ValueType.DOUBLE, aggtype, ExecType.SPARK);
      preagg.getOutputParameters().setDimensions(rlenAgg, clen, brlen, bclen, -1);
      setLineNumbers(preagg);

      TEMP = preagg;
      level++;
      force = false; // in case of unknowns, generate one level
    }

    // in-memory cum sum (of partial aggregates)
    if (TEMP.getOutputParameters().getNumRows() != 1) {
      int k = OptimizerUtils.getConstrainedNumThreads(_maxNumThreads);
      Unary unary1 =
          new Unary(
              TEMP, HopsOpOp1LopsU.get(_op), DataType.MATRIX, ValueType.DOUBLE, ExecType.CP, k);
      unary1
          .getOutputParameters()
          .setDimensions(TEMP.getOutputParameters().getNumRows(), clen, brlen, bclen, -1);
      setLineNumbers(unary1);
      TEMP = unary1;
    }

    // split, group and mr cumsum
    while (level-- > 0) {
      // (for spark, the CumulativeOffsetBinary subsumes both the split aggregate and
      // the subsequent offset binary apply of split aggregates against the original data)
      double initValue = getCumulativeInitValue();
      CumulativeOffsetBinary binary =
          new CumulativeOffsetBinary(
              DATA.get(level),
              TEMP,
              DataType.MATRIX,
              ValueType.DOUBLE,
              initValue,
              aggtype,
              ExecType.SPARK);
      binary.getOutputParameters().setDimensions(rlen, clen, brlen, bclen, -1);
      setLineNumbers(binary);
      TEMP = binary;
    }

    return TEMP;
  }
  @Override
  public Lop constructLops() throws HopsException, LopsException {
    // reuse existing lop
    if (getLops() != null) return getLops();

    try {
      Hop input = getInput().get(0);

      if (getDataType() == DataType.SCALAR // value type casts or matrix to scalar
          || (_op == OpOp1.CAST_AS_MATRIX && getInput().get(0).getDataType() == DataType.SCALAR)
          || (_op == OpOp1.CAST_AS_FRAME && getInput().get(0).getDataType() == DataType.SCALAR)) {
        if (_op == Hop.OpOp1.IQM) // special handling IQM
        {
          Lop iqmLop = constructLopsIQM();
          setLops(iqmLop);
        } else if (_op == Hop.OpOp1.MEDIAN) {
          Lop medianLop = constructLopsMedian();
          setLops(medianLop);
        } else // general case SCALAR/CAST (always in CP)
        {
          UnaryCP.OperationTypes optype = HopsOpOp1LopsUS.get(_op);
          if (optype == null)
            throw new HopsException(
                "Unknown UnaryCP lop type for UnaryOp operation type '" + _op + "'");

          UnaryCP unary1 =
              new UnaryCP(input.constructLops(), optype, getDataType(), getValueType());
          setOutputDimensions(unary1);
          setLineNumbers(unary1);

          setLops(unary1);
        }
      } else // general case MATRIX
      {
        ExecType et = optFindExecType();

        // special handling cumsum/cumprod/cummin/cumsum
        if (isCumulativeUnaryOperation() && et != ExecType.CP) {
          // TODO additional physical operation if offsets fit in memory
          Lop cumsumLop = null;
          if (et == ExecType.MR) cumsumLop = constructLopsMRCumulativeUnary();
          else cumsumLop = constructLopsSparkCumulativeUnary();
          setLops(cumsumLop);
        } else // default unary
        {
          int k =
              isCumulativeUnaryOperation()
                  ? OptimizerUtils.getConstrainedNumThreads(_maxNumThreads)
                  : 1;
          Unary unary1 =
              new Unary(
                  input.constructLops(),
                  HopsOpOp1LopsU.get(_op),
                  getDataType(),
                  getValueType(),
                  et,
                  k);
          setOutputDimensions(unary1);
          setLineNumbers(unary1);
          setLops(unary1);
        }
      }
    } catch (Exception e) {
      throw new HopsException(
          this.printErrorLocation() + "error constructing Lops for UnaryOp Hop -- \n ", e);
    }

    // add reblock/checkpoint lops if necessary
    constructAndSetLopsDataFlowProperties();

    return getLops();
  }