Пример #1
0
 /**
  * Do the full tangent normalization process given proportional coverage data.
  *
  * <p>This includes:
  *
  * <ul>
  *   <li>normalization by target factors
  *   <li>projection of the normalized coverage profile into the hyperplane from the PoN
  * </ul>
  *
  * @param pon -- never {@code null}
  * @param pcov -- never {@code null}. Must contain data for at least one sample.
  * @param ctx spark context. Use {@code null} if no context is available
  * @return never {@code null}
  */
 public static TangentNormalizationResult tangentNormalizePcov(
     final PoN pon, final ReadCountCollection pcov, final JavaSparkContext ctx) {
   Utils.nonNull(pon, "PoN cannot be null.");
   Utils.nonNull(pcov, "input pcov read counts cannot be null when creating a coverage profile.");
   ParamUtils.isPositive(
       pcov.columnNames().size(), "input cov profile column names cannot be an empty list.");
   final ReadCountCollection coverageProfile = createCoverageProfile(pon, pcov);
   return TangentNormalizer.tangentNormalize(pon, coverageProfile, ctx);
 }
Пример #2
0
  private static ReadCountCollection createCoverageProfile(
      final PoN pon, final ReadCountCollection inputReadCounts) {
    Utils.nonNull(pon, "PoN cannot be null.");
    Utils.nonNull(
        inputReadCounts, "input read counts cannot be null when creating a coverage profile.");
    ParamUtils.isPositive(
        inputReadCounts.columnNames().size(),
        "inputReadCounts column names cannot be an empty list.");
    final Case2PoNTargetMapper targetMapper =
        new Case2PoNTargetMapper(inputReadCounts.targets(), pon.getTargetNames());
    final RealMatrix inputCounts = targetMapper.fromCaseToPoNCounts(inputReadCounts.counts());
    final RealMatrix targetNormalizedCounts = pon.factorNormalization(inputCounts);

    return targetMapper.fromPoNtoCaseCountCollection(
        targetNormalizedCounts, inputReadCounts.columnNames());
  }
Пример #3
0
  /**
   * Tangent normalize a coverage profile.
   *
   * <p>Notes about the Spark tangent normalization can be found in docs/PoN/
   *
   * @param pon Not {@code null}
   * @param targetFactorNormalizedCounts ReadCountCollection of counts that have already been
   *     normalized fully (typically, including the target factor normalization). I.e. a coverage
   *     profile The column names should be intact. Not {@code null} See {@link
   *     TangentNormalizer::createCoverageProfile}
   * @return never {@code null}
   */
  private static TangentNormalizationResult tangentNormalize(
      final PoN pon, final ReadCountCollection targetFactorNormalizedCounts, JavaSparkContext ctx) {

    Utils.nonNull(pon, "PoN cannot be null.");
    Utils.nonNull(targetFactorNormalizedCounts, "targetFactorNormalizedCounts cannot be null.");
    Utils.nonNull(
        targetFactorNormalizedCounts.columnNames(),
        "targetFactorNormalizedCounts column names cannot be null.");
    ParamUtils.isPositive(
        targetFactorNormalizedCounts.columnNames().size(),
        "targetFactorNormalizedCounts column names cannot be an empty list.");

    final Case2PoNTargetMapper targetMapper =
        new Case2PoNTargetMapper(targetFactorNormalizedCounts.targets(), pon.getPanelTargetNames());

    // The input counts with rows (targets) sorted so that they match the PoN's order.
    final RealMatrix tangentNormalizationRawInputCounts =
        targetMapper.fromCaseToPoNCounts(targetFactorNormalizedCounts.counts());

    // We prepare the counts for tangent normalization.
    final RealMatrix tangentNormalizationInputCounts =
        composeTangentNormalizationInputMatrix(tangentNormalizationRawInputCounts);

    if (ctx == null) {

      // Calculate the beta-hats for the input read count columns (samples).
      logger.info("Calculating beta hats...");
      final RealMatrix tangentBetaHats =
          pon.betaHats(tangentNormalizationInputCounts, true, EPSILON);

      // Actual tangent normalization step.
      logger.info(
          "Performing actual tangent normalization ("
              + tangentNormalizationInputCounts.getColumnDimension()
              + " columns)...");
      final RealMatrix tangentNormalizedCounts =
          pon.tangentNormalization(tangentNormalizationInputCounts, tangentBetaHats, true);

      // Output the tangent normalized counts.
      logger.info("Post-processing tangent normalization results...");
      final ReadCountCollection tangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizedCounts, targetFactorNormalizedCounts.columnNames());
      final ReadCountCollection preTangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizationInputCounts, targetFactorNormalizedCounts.columnNames());

      return new TangentNormalizationResult(
          tangentNormalized, preTangentNormalized, tangentBetaHats, targetFactorNormalizedCounts);

    } else {

      /*
      Using Spark:  the code here is a little more complex for optimization purposes.

      Please see notes in docs/PoN ...

      Ahat^T = (C^T P^T) A^T
      Therefore, C^T is the RowMatrix

      pinv: P
      panel: A
      projection: Ahat
      cases: C
      betahat: C^T P^T
      tangentNormalizedCounts: C - Ahat
       */
      final RealMatrix pinv = pon.getReducedPanelPInverseCounts();
      final RealMatrix panel = pon.getReducedPanelCounts();

      // Make the C^T a distributed matrix (RowMatrix)
      final RowMatrix caseTDistMat =
          SparkConverter.convertRealMatrixToSparkRowMatrix(
              ctx, tangentNormalizationInputCounts.transpose(), TN_NUM_SLICES_SPARK);

      // Spark local matrices (transposed)
      final Matrix pinvTLocalMat =
          new DenseMatrix(
                  pinv.getRowDimension(),
                  pinv.getColumnDimension(),
                  Doubles.concat(pinv.getData()),
                  true)
              .transpose();
      final Matrix panelTLocalMat =
          new DenseMatrix(
                  panel.getRowDimension(),
                  panel.getColumnDimension(),
                  Doubles.concat(panel.getData()),
                  true)
              .transpose();

      // Calculate the projection transpose in a distributed matrix, then convert to Apache Commons
      // matrix (not transposed)
      final RowMatrix betahatDistMat = caseTDistMat.multiply(pinvTLocalMat);
      final RowMatrix projectionTDistMat = betahatDistMat.multiply(panelTLocalMat);
      final RealMatrix projection =
          SparkConverter.convertSparkRowMatrixToRealMatrix(
                  projectionTDistMat, tangentNormalizationInputCounts.transpose().getRowDimension())
              .transpose();

      // Subtract the cases from the projection
      final RealMatrix tangentNormalizedCounts =
          tangentNormalizationInputCounts.subtract(projection);

      // Construct the result object and return it with the correct targets.
      final ReadCountCollection tangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizedCounts, targetFactorNormalizedCounts.columnNames());
      final ReadCountCollection preTangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizationInputCounts, targetFactorNormalizedCounts.columnNames());
      final RealMatrix tangentBetaHats =
          SparkConverter.convertSparkRowMatrixToRealMatrix(
              betahatDistMat, tangentNormalizedCounts.getColumnDimension());
      return new TangentNormalizationResult(
          tangentNormalized,
          preTangentNormalized,
          tangentBetaHats.transpose(),
          targetFactorNormalizedCounts);
    }
  }
 /**
  * Transforms and composes the string representation of an individual count.
  *
  * <p>The output string must be fully formatted human friendly representation of the transformed
  * value.
  *
  * @param count the individual count value.
  * @param columnTotal the corresponding column total sum.
  * @return never {@code null}.
  * @throws IllegalArgumentException if {@code count} is less than 0 or greater than {@code
  *     columnTotal}.
  */
 protected String apply(final int count, final long columnTotal) {
   ParamUtils.isPositiveOrZero(count, "the count cannot less than 0");
   Utils.validateArg(count <= columnTotal, "the count cannot be larger than the column total");
   return operator.apply(count, columnTotal);
 }