コード例 #1
0
  @Override
  protected double getValue(final int x, final int y, final Coordinate crd)
      throws GeoGridException {
    final double wspHeight = crd.z;
    if (Double.isNaN(wspHeight)) return Double.NaN;

    // possible that waterdepth input grid contains water depth less than zero!
    if (wspHeight <= 0.0) return Double.NaN;

    if (m_polygonCollection.size() == 0) return Double.NaN;

    final double cx = crd.x;
    final double cy = crd.y;

    try {
      final GM_Position position =
          m_geoTransformer.transform(GeometryFactory.createGM_Position(cx, cy), getSourceCRS());

      final double damageValue = calculateDamageValue(position, wspHeight);

      final Coordinate resultCrd = new Coordinate(position.getX(), position.getY(), damageValue);

      if (Doubles.isFinite(damageValue))
        m_statistics.addSpecificDamage(m_returnPeriod, resultCrd, m_cellSize);

      return damageValue;
    } catch (final Exception e) {
      e.printStackTrace();
      throw new GeoGridException(e.getLocalizedMessage(), e);
    }
  }
コード例 #2
0
  /**
   * Calculates the price of the bond future product with z-spread.
   *
   * <p>The price of the product is the price on the valuation date.
   *
   * <p>The z-spread is a parallel shift applied to continuously compounded rates or periodic
   * compounded rates of the issuer discounting curve.
   *
   * @param future the future to price
   * @param provider the rates provider
   * @param zSpread the z-spread
   * @param compoundedRateType the compounded rate type
   * @param periodPerYear the number of periods per year
   * @return the price of the product, in decimal form
   */
  public double priceWithZSpread(
      BondFuture future,
      LegalEntityDiscountingProvider provider,
      double zSpread,
      CompoundedRateType compoundedRateType,
      int periodPerYear) {

    ImmutableList<Security<FixedCouponBond>> bondSecurity = future.getBondSecurityBasket();
    int size = bondSecurity.size();
    double[] priceBonds = new double[size];
    for (int i = 0; i < size; ++i) {
      Security<FixedCouponBond> bond = bondSecurity.get(i);
      double dirtyPrice =
          bondPricer.dirtyPriceFromCurvesWithZSpread(
              bond,
              provider,
              zSpread,
              compoundedRateType,
              periodPerYear,
              future.getLastDeliveryDate());
      priceBonds[i] =
          bondPricer.cleanPriceFromDirtyPrice(
                  bond.getProduct(), future.getLastDeliveryDate(), dirtyPrice)
              / future.getConversionFactor().get(i);
    }
    return Doubles.min(priceBonds);
  }
コード例 #3
0
  /**
   * Attempts to convert the provided string value to a numeric type, trying Integer, Long and
   * Double in order until successful.
   */
  @Override
  public Object convert(String value) {
    if (value == null || value.isEmpty()) {
      return value;
    }

    Object result = Ints.tryParse(value);

    if (result != null) {
      return result;
    }

    result = Longs.tryParse(value);

    if (result != null) {
      return result;
    }

    result = Doubles.tryParse(value);

    if (result != null) {
      return result;
    }

    return value;
  }
コード例 #4
0
  private double[] getSparkModelInfoFromHDFS(Path location, Configuration conf) throws Exception {

    FileSystem fileSystem = FileSystem.get(location.toUri(), conf);
    FileStatus[] files = fileSystem.listStatus(location);

    if (files == null) throw new Exception("Couldn't find Spark Truck ML weights at: " + location);

    ArrayList<Double> modelInfo = new ArrayList<Double>();
    for (FileStatus file : files) {

      if (file.getPath().getName().startsWith("_")) {
        continue;
      }

      InputStream stream = fileSystem.open(file.getPath());

      StringWriter writer = new StringWriter();
      IOUtils.copy(stream, writer, "UTF-8");
      String raw = writer.toString();
      for (String str : raw.split("\n")) {
        modelInfo.add(Double.valueOf(str));
      }
    }

    return Doubles.toArray(modelInfo);
  }
コード例 #5
0
ファイル: LoadRequirement.java プロジェクト: valamburi/RinSim
  @Override
  public boolean apply(@Nullable Scenario scenario) {
    final List<Double> loads =
        newArrayList(
            relative ? Metrics.measureRelativeLoad(scenario) : Metrics.measureLoad(scenario));
    final int toAdd = desiredLoadList.size() - loads.size();
    for (int j = 0; j < toAdd; j++) {
      loads.add(0d);
    }

    final double[] deviations =
        abs(subtract(Doubles.toArray(desiredLoadList), Doubles.toArray(loads)));
    final double mean = StatUtils.mean(deviations);
    final double max = Doubles.max(deviations);
    return max <= maxMax && mean <= maxMean;
  }
コード例 #6
0
  private void extremaOp(TiffMeta surface, GridCoverage2D gridCoverage2D) {
    double min = Double.MAX_VALUE;
    double max = Double.MIN_VALUE;

    RenderedImage img = gridCoverage2D.getRenderedImage();

    RenderedOp extremaOp = ExtremaDescriptor.create(img, null, 10, 10, false, 1, null);
    double[] allMins = (double[]) extremaOp.getProperty("minimum");
    min = Doubles.min(allMins);

    double[] allMaxs = (double[]) extremaOp.getProperty("maximum");
    max = Doubles.max(allMaxs);

    surface.setMaxVal(max);
    surface.setMinVal(min);
  }
コード例 #7
0
ファイル: DoubleMathTest.java プロジェクト: hinike/opera
 public void testFuzzyEqualsZeroTolerance() {
   // make sure we test -0 tolerance
   for (double zero : Doubles.asList(0.0, -0.0)) {
     for (double a : ALL_DOUBLE_CANDIDATES) {
       for (double b : ALL_DOUBLE_CANDIDATES) {
         assertEquals(
             a == b || (Double.isNaN(a) && Double.isNaN(b)), DoubleMath.fuzzyEquals(a, b, zero));
       }
     }
   }
 }
コード例 #8
0
    @SuppressWarnings("synthetic-access")
    @Override
    public CurrencyLabelledMatrix1D buildObject(
        final FudgeDeserializer deserializer, final FudgeMsg message) {
      final FudgeMsg msg = message.getMessage(MATRIX_FIELD_NAME);

      final Queue<String> labelTypes = new LinkedList<String>();
      final Queue<FudgeField> labelValues = new LinkedList<FudgeField>();

      final List<Currency> keys = new LinkedList<Currency>();
      final List<Object> labels = new LinkedList<Object>();
      final List<Double> values = new LinkedList<Double>();

      for (final FudgeField field : msg) {
        switch (field.getOrdinal()) {
          case LABEL_TYPE_ORDINAL:
            labelTypes.add((String) field.getValue());
            break;
          case KEY_ORDINAL:
            keys.add(Currency.of((String) field.getValue()));
            break;
          case LABEL_ORDINAL:
            labelValues.add(field);
            break;
          case VALUE_ORDINAL:
            values.add((Double) field.getValue());
            break;
        }

        if (!labelTypes.isEmpty() && !labelValues.isEmpty()) {
          // Have a type and a value, which can be consumed
          final String labelType = labelTypes.remove();
          Class<?> labelClass;
          try {
            labelClass = LabelledMatrix1DBuilder.getLabelClass(labelType, _loadedClasses);
          } catch (final ClassNotFoundException ex) {
            throw new OpenGammaRuntimeException(
                "Could not deserialize label of type " + labelType, ex);
          }
          final FudgeField labelValue = labelValues.remove();
          final Object label = deserializer.fieldValueToObject(labelClass, labelValue);
          //          labels.add(Currency.of((String) label));
          labels.add(label);
        }
      }

      final int matrixSize = keys.size();
      final Currency[] keysArray = new Currency[matrixSize];
      keys.toArray(keysArray);
      final Object[] labelsArray = new Object[matrixSize];
      labels.toArray(labelsArray);
      final double[] valuesArray = Doubles.toArray(values);
      return new CurrencyLabelledMatrix1D(keysArray, labelsArray, valuesArray);
    }
コード例 #9
0
    static Map<Imt, Map<Gmm, List<Double>>> initValueMaps(Set<Imt> imts, Set<Gmm> gmms, int size) {

      Map<Imt, Map<Gmm, List<Double>>> imtMap = Maps.newEnumMap(Imt.class);
      for (Imt imt : imts) {
        Map<Gmm, List<Double>> gmmMap = Maps.newEnumMap(Gmm.class);
        for (Gmm gmm : gmms) {
          gmmMap.put(gmm, Doubles.asList(new double[size]));
        }
        imtMap.put(imt, gmmMap);
      }
      return imtMap;
    }
コード例 #10
0
 private double addScores(final IDType target, final Iterable<Integer> ids) {
   Collection<Double> scores = new ArrayList<>();
   for (Integer id : ids) {
     Optional<Integer> my = mapping.getUnchecked(Pair.make(target, id));
     if (!my.isPresent()) continue;
     Double s = this.scores.get(my.get());
     if (s == null) continue;
     scores.add(s);
   }
   if (scores.isEmpty()) return Double.NaN;
   if (scores.size() == 1) return scores.iterator().next().doubleValue();
   return operator.combine(Doubles.toArray(scores));
 }
コード例 #11
0
 static {
   ImmutableSet.Builder<Double> integralBuilder = ImmutableSet.builder();
   ImmutableSet.Builder<Double> fractionalBuilder = ImmutableSet.builder();
   integralBuilder.addAll(Doubles.asList(0.0, -0.0, Double.MAX_VALUE, -Double.MAX_VALUE));
   // Add small multiples of MIN_VALUE and MIN_NORMAL
   for (int scale = 1; scale <= 4; scale++) {
     for (double d : Doubles.asList(Double.MIN_VALUE, Double.MIN_NORMAL)) {
       fractionalBuilder.add(d * scale).add(-d * scale);
     }
   }
   for (double d :
       Doubles.asList(
           0,
           1,
           2,
           7,
           51,
           102,
           Math.scalb(1.0, 53),
           Integer.MIN_VALUE,
           Integer.MAX_VALUE,
           Long.MIN_VALUE,
           Long.MAX_VALUE)) {
     for (double delta : Doubles.asList(0.0, 1.0, 2.0)) {
       integralBuilder.addAll(Doubles.asList(d + delta, d - delta, -d - delta, -d + delta));
     }
     for (double delta : Doubles.asList(0.01, 0.1, 0.25, 0.499, 0.5, 0.501, 0.7, 0.8)) {
       double x = d + delta;
       if (x != Math.round(x)) {
         fractionalBuilder.add(x);
       }
     }
   }
   INTEGRAL_DOUBLE_CANDIDATES = integralBuilder.build();
   fractionalBuilder.add(1.414).add(1.415).add(Math.sqrt(2));
   fractionalBuilder.add(5.656).add(5.657).add(4 * Math.sqrt(2));
   for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
     double x = 1 / d;
     if (x != Math.rint(x)) {
       fractionalBuilder.add(x);
     }
   }
   FRACTIONAL_DOUBLE_CANDIDATES = fractionalBuilder.build();
   FINITE_DOUBLE_CANDIDATES =
       Iterables.concat(FRACTIONAL_DOUBLE_CANDIDATES, INTEGRAL_DOUBLE_CANDIDATES);
   POSITIVE_FINITE_DOUBLE_CANDIDATES =
       Iterables.filter(
           FINITE_DOUBLE_CANDIDATES,
           new Predicate<Double>() {
             @Override
             public boolean apply(Double input) {
               return input.doubleValue() > 0.0;
             }
           });
   DOUBLE_CANDIDATES_EXCEPT_NAN = Iterables.concat(FINITE_DOUBLE_CANDIDATES, INFINITIES);
   ALL_DOUBLE_CANDIDATES = Iterables.concat(DOUBLE_CANDIDATES_EXCEPT_NAN, asList(Double.NaN));
 }
コード例 #12
0
  @Override
  public int compare(TraitProxy o1, TraitProxy o2) {
    Double value1 = Double.MIN_VALUE;
    Double value2 = Double.MAX_VALUE;
    try {
      if (o1.getValue() != null) value1 = Double.parseDouble(o1.getValue());
    } catch (NumberFormatException e) {
    }
    try {
      if (o2.getValue() != null) value2 = Double.parseDouble(o2.getValue());
    } catch (NumberFormatException e) {

    }
    return Doubles.compare(value1, value2);
  }
コード例 #13
0
    @Override
    public LocalDateLabelledMatrix1D buildObject(
        final FudgeDeserializer deserializer, final FudgeMsg message) {
      final FudgeMsg msg = message.getMessage(MATRIX_FIELD_NAME);

      final Queue<String> labelTypes = new LinkedList<String>();
      final Queue<FudgeField> labelValues = new LinkedList<FudgeField>();

      final List<LocalDate> keys = new LinkedList<LocalDate>();
      final List<Object> labels = new LinkedList<Object>();
      final List<Double> values = new LinkedList<Double>();

      for (final FudgeField field : msg) {
        switch (field.getOrdinal()) {
          case LABEL_TYPE_ORDINAL:
            labelTypes.add((String) field.getValue());
            break;
          case KEY_ORDINAL:
            keys.add(((FudgeDate) field.getValue()).toLocalDate());
            break;
          case LABEL_ORDINAL:
            labelValues.add(field);
            break;
          case VALUE_ORDINAL:
            values.add((Double) field.getValue());
            break;
        }

        if (!labelTypes.isEmpty() && !labelValues.isEmpty()) {
          // Have a type and a value, which can be consumed
          final String labelType = labelTypes.remove();
          Class<?> labelClass = getClass(labelType);
          final FudgeField labelValue = labelValues.remove();
          final Object label = deserializer.fieldValueToObject(labelClass, labelValue);
          labels.add(label);
        }
      }

      final int matrixSize = keys.size();
      final LocalDate[] keysArray = new LocalDate[matrixSize];
      keys.toArray(keysArray);
      final Object[] labelsArray = new Object[matrixSize];
      labels.toArray(labelsArray);
      final double[] valuesArray = Doubles.toArray(values);
      return new LocalDateLabelledMatrix1D(keysArray, labelsArray, valuesArray);
    }
 @Override
 public SurfaceCurrencyParameterSensitivity surfaceCurrencyParameterSensitivity(
     IborCapletFloorletSensitivity point) {
   ArgChecker.isTrue(
       point.getIndex().equals(index),
       "Ibor index of provider must be the same as Ibor index of point sensitivity");
   double expiry = relativeTime(point.getExpiry());
   double strike = point.getStrike();
   // copy to ImmutableMap to lock order (keySet and values used separately but must match)
   Map<DoublesPair, Double> result =
       ImmutableMap.copyOf(surface.zValueParameterSensitivity(expiry, strike));
   SurfaceCurrencyParameterSensitivity parameterSensi =
       SurfaceCurrencyParameterSensitivity.of(
           updateSurfaceMetadata(result.keySet()),
           point.getCurrency(),
           DoubleArray.copyOf(Doubles.toArray(result.values())));
   return parameterSensi.multipliedBy(point.getSensitivity());
 }
コード例 #15
0
ファイル: ComparableVector.java プロジェクト: combineads/tajo
 protected final int compare(int index1, int index2) {
   final boolean n1 = nulls.get(index1);
   final boolean n2 = nulls.get(index2);
   if (n1 && n2) {
     return 0;
   }
   if (n1 ^ n2) {
     return nullsFirst ? (n1 ? -1 : 1) : (n1 ? 1 : -1);
   }
   int compare;
   switch (type) {
     case 0:
       compare = Booleans.compare(booleans[index1], booleans[index2]);
       break;
     case 1:
       compare = bits[index1] - bits[index2];
       break;
     case 2:
       compare = Shorts.compare(shorts[index1], shorts[index2]);
       break;
     case 3:
       compare = Ints.compare(ints[index1], ints[index2]);
       break;
     case 4:
       compare = Longs.compare(longs[index1], longs[index2]);
       break;
     case 5:
       compare = Floats.compare(floats[index1], floats[index2]);
       break;
     case 6:
       compare = Doubles.compare(doubles[index1], doubles[index2]);
       break;
     case 7:
       compare = TextDatum.COMPARATOR.compare(bytes[index1], bytes[index2]);
       break;
     case 8:
       compare = UnsignedInts.compare(ints[index1], ints[index2]);
       break;
     default:
       throw new IllegalArgumentException();
   }
   return ascending ? compare : -compare;
 }
コード例 #16
0
  private static Double evaluate(Collection<?> values, int quantile) {
    List<Double> doubleValues = new ArrayList<>();

    for (Object value : values) {
      Double doubleValue = (Double) TypeUtil.parseOrCast(DataType.DOUBLE, value);

      doubleValues.add(doubleValue);
    }

    double[] data = Doubles.toArray(doubleValues);

    // The data must be (at least partially) ordered
    Arrays.sort(data);

    Percentile percentile = new Percentile();
    percentile.setData(data);

    return percentile.evaluate(quantile);
  }
コード例 #17
0
 @Override
 public StringLabelledMatrix1D buildObject(
     final FudgeDeserializer deserializer, final FudgeMsg message) {
   final FudgeMsg msg = message.getMessage(MATRIX_FIELD_NAME);
   final List<String> keys = new LinkedList<String>();
   final List<Double> values = new LinkedList<Double>();
   for (final FudgeField field : msg) {
     switch (field.getOrdinal()) {
       case KEY_ORDINAL:
         keys.add((String) field.getValue());
         break;
       case VALUE_ORDINAL:
         values.add((Double) field.getValue());
         break;
     }
   }
   String[] keysArray = keys.toArray(ArrayUtils.EMPTY_STRING_ARRAY);
   final double[] valuesArray = Doubles.toArray(values);
   return new StringLabelledMatrix1D(keysArray, valuesArray);
 }
  private static Map<String, double[]> parseVectorDictionary(
      SupportVectorMachineModel supportVectorMachineModel) {
    VectorDictionary vectorDictionary = supportVectorMachineModel.getVectorDictionary();

    VectorFields vectorFields = vectorDictionary.getVectorFields();

    List<FieldRef> fieldRefs = vectorFields.getFieldRefs();

    Map<String, double[]> result = new LinkedHashMap<>();

    List<VectorInstance> vectorInstances = vectorDictionary.getVectorInstances();
    for (VectorInstance vectorInstance : vectorInstances) {
      String id = vectorInstance.getId();
      if (id == null) {
        throw new InvalidFeatureException(vectorInstance);
      }

      Array array = vectorInstance.getArray();
      RealSparseArray sparseArray = vectorInstance.getREALSparseArray();

      List<? extends Number> values;

      if (array != null && sparseArray == null) {
        values = ArrayUtil.asNumberList(array);
      } else if (array == null && sparseArray != null) {
        values = SparseArrayUtil.asNumberList(sparseArray);
      } else {
        throw new InvalidFeatureException(vectorInstance);
      } // End if

      if (fieldRefs.size() != values.size()) {
        throw new InvalidFeatureException(vectorInstance);
      }

      double[] vector = Doubles.toArray(values);

      result.put(id, vector);
    }

    return result;
  }
コード例 #19
0
  @Override
  public void execute(Tuple input) {
    String line = input.getString(0);

    List<Double> list = new ArrayList<Double>();

    int firstIndex = line.indexOf("[");
    int lastIndex = line.indexOf("]");

    String subLine = line.substring(firstIndex + 1, lastIndex);
    //  System.out.println(subLine);
    String[] subStrings = subLine.split(",");

    String author = subStrings[0];
    for (int i = 1; i < subStrings.length; i++) {
      list.add(Double.valueOf(subStrings[i]));
    }

    vector = Doubles.toArray(list);
    //    System.out.println(author+" "+list+" "+vector);
    collector.emit(input, new Values(author, list, vector));
    collector.ack(input);
  }
コード例 #20
0
 /**
  * Computes the first and second order derivatives of the Black implied volatility in the SABR
  * model.
  *
  * <p>The first derivative values will be stored in the input array {@code volatilityD} The array
  * contains, [0] Derivative w.r.t the forward, [1] the derivative w.r.t the strike, [2] the
  * derivative w.r.t. to alpha, [3] the derivative w.r.t. to beta, [4] the derivative w.r.t. to
  * rho, and [5] the derivative w.r.t. to nu. Thus the length of the array should be 6.
  *
  * <p>The second derivative values will be stored in the input array {@code volatilityD2}. Only
  * the second order derivative with respect to the forward and strike are implemented. The array
  * contains [0][0] forward-forward; [0][1] forward-strike; [1][1] strike-strike. Thus the size
  * should be 2 x 2.
  *
  * <p>Around ATM, a first order expansion is used to due to some 0/0-type indetermination. The
  * second order derivative produced is poor around ATM.
  *
  * @param forward the forward value of the underlying
  * @param strike the strike value of the option
  * @param timeToExpiry the time to expiry of the option
  * @param data the SABR data.
  * @param volatilityD the array used to return the first order derivative
  * @param volatilityD2 the array of array used to return the second order derivative
  * @return the Black implied volatility
  */
 @Override
 public double volatilityAdjoint2(
     double forward,
     double strike,
     double timeToExpiry,
     SabrFormulaData data,
     double[] volatilityD,
     double[][] volatilityD2) {
   double k = Math.max(strike, 0.000001);
   double alpha = data.getAlpha();
   double beta = data.getBeta();
   double rho = data.getRho();
   double nu = data.getNu();
   // Forward
   double h0 = (1 - beta) / 2;
   double h1 = forward * k;
   double h1h0 = Math.pow(h1, h0);
   double h12 = h1h0 * h1h0;
   double h2 = Math.log(forward / k);
   double h22 = h2 * h2;
   double h23 = h22 * h2;
   double h24 = h23 * h2;
   double f1 = h1h0 * (1 + h0 * h0 / 6.0 * (h22 + h0 * h0 / 20.0 * h24));
   double f2 = nu / alpha * h1h0 * h2;
   double f3 =
       h0 * h0 / 6.0 * alpha * alpha / h12
           + rho * beta * nu * alpha / 4.0 / h1h0
           + (2 - 3 * rho * rho) / 24.0 * nu * nu;
   double sqrtf2 = Math.sqrt(1 - 2 * rho * f2 + f2 * f2);
   double f2x = 0.0;
   double x = 0.0, xp = 0, xpp = 0;
   if (DoubleMath.fuzzyEquals(f2, 0.0, SMALL_Z)) {
     f2x = 1.0 - 0.5 * f2 * rho; // small f2 expansion to f2^2 terms
   } else {
     if (DoubleMath.fuzzyEquals(rho, 1.0, RHO_EPS)) {
       x =
           f2 < 1.0
               ? -Math.log(1.0 - f2) - 0.5 * Math.pow(f2 / (f2 - 1.0), 2) * (1.0 - rho)
               : Math.log(2.0 * f2 - 2.0) - Math.log(1.0 - rho);
     } else {
       x = Math.log((sqrtf2 + f2 - rho) / (1 - rho));
     }
     xp = 1. / sqrtf2;
     xpp = (rho - f2) / Math.pow(sqrtf2, 3.0);
     f2x = f2 / x;
   }
   double sigma = Math.max(MIN_VOL, alpha / f1 * f2x * (1 + f3 * timeToExpiry));
   // First level
   double h0Dbeta = -0.5;
   double sigmaDf1 = -sigma / f1;
   double sigmaDf2 = 0;
   if (DoubleMath.fuzzyEquals(f2, 0.0, SMALL_Z)) {
     sigmaDf2 = alpha / f1 * (1 + f3 * timeToExpiry) * -0.5 * rho;
   } else {
     sigmaDf2 = alpha / f1 * (1 + f3 * timeToExpiry) * (1.0 / x - f2 * xp / (x * x));
   }
   double sigmaDf3 = alpha / f1 * f2x * timeToExpiry;
   double sigmaDf4 = f2x / f1 * (1 + f3 * timeToExpiry);
   double sigmaDx = -alpha / f1 * f2 / (x * x) * (1 + f3 * timeToExpiry);
   double[][] sigmaD2ff = new double[3][3];
   sigmaD2ff[0][0] = -sigmaDf1 / f1 + sigma / (f1 * f1); // OK
   sigmaD2ff[0][1] = -sigmaDf2 / f1;
   sigmaD2ff[0][2] = -sigmaDf3 / f1;
   if (DoubleMath.fuzzyEquals(f2, 0.0, SMALL_Z)) {
     sigmaD2ff[1][2] = alpha / f1 * -0.5 * rho * timeToExpiry;
   } else {
     sigmaD2ff[1][1] =
         alpha
             / f1
             * (1 + f3 * timeToExpiry)
             * (-2 * xp / (x * x) - f2 * xpp / (x * x) + 2 * f2 * xp * xp / (x * x * x));
     sigmaD2ff[1][2] = alpha / f1 * timeToExpiry * (1.0 / x - f2 * xp / (x * x));
   }
   sigmaD2ff[2][2] = 0.0;
   //      double sigma = alpha / f1 * f2x * (1 + f3 * theta);
   // Second level
   double[] f1Dh = new double[3];
   double[] f2Dh = new double[3];
   double[] f3Dh = new double[3];
   f1Dh[0] = h1h0 * (h0 * (h22 / 3.0 + h0 * h0 / 40.0 * h24)) + Math.log(h1) * f1;
   f1Dh[1] = h0 * f1 / h1;
   f1Dh[2] = h1h0 * (h0 * h0 / 6.0 * (2.0 * h2 + h0 * h0 / 5.0 * h23));
   f2Dh[0] = Math.log(h1) * f2;
   f2Dh[1] = h0 * f2 / h1;
   f2Dh[2] = nu / alpha * h1h0;
   f3Dh[0] =
       h0 / 3.0 * alpha * alpha / h12
           - 2 * h0 * h0 / 6.0 * alpha * alpha / h12 * Math.log(h1)
           - rho * beta * nu * alpha / 4.0 / h1h0 * Math.log(h1);
   f3Dh[1] =
       -2 * h0 * h0 / 6.0 * alpha * alpha / h12 * h0 / h1
           - rho * beta * nu * alpha / 4.0 / h1h0 * h0 / h1;
   f3Dh[2] = 0.0;
   double[] f1Dp = new double[4]; // Derivative to sabr parameters
   double[] f2Dp = new double[4];
   double[] f3Dp = new double[4];
   double[] f4Dp = new double[4];
   f1Dp[0] = 0.0;
   f1Dp[1] = f1Dh[0] * h0Dbeta;
   f1Dp[2] = 0.0;
   f1Dp[3] = 0.0;
   f2Dp[0] = -f2 / alpha;
   f2Dp[1] = f2Dh[0] * h0Dbeta;
   f2Dp[2] = 0.0;
   f2Dp[3] = h1h0 * h2 / alpha;
   f3Dp[0] = h0 * h0 / 3.0 * alpha / h12 + rho * beta * nu / 4.0 / h1h0;
   f3Dp[1] = rho * nu * alpha / 4.0 / h1h0 + f3Dh[0] * h0Dbeta;
   f3Dp[2] = beta * nu * alpha / 4.0 / h1h0 - rho / 4.0 * nu * nu;
   f3Dp[3] = rho * beta * alpha / 4.0 / h1h0 + (2 - 3 * rho * rho) / 12.0 * nu;
   f4Dp[0] = 1.0;
   f4Dp[1] = 0.0;
   f4Dp[2] = 0.0;
   f4Dp[3] = 0.0;
   double sigmaDh1 = sigmaDf1 * f1Dh[1] + sigmaDf2 * f2Dh[1] + sigmaDf3 * f3Dh[1];
   double sigmaDh2 = sigmaDf1 * f1Dh[2] + sigmaDf2 * f2Dh[2] + sigmaDf3 * f3Dh[2];
   double[][] f1D2hh = new double[2][2]; // No h0
   double[][] f2D2hh = new double[2][2];
   double[][] f3D2hh = new double[2][2];
   f1D2hh[0][0] = h0 * (h0 - 1) * f1 / (h1 * h1);
   f1D2hh[0][1] = h0 * h1h0 / h1 * h0 * h0 / 6.0 * (2.0 * h2 + 4.0 * h0 * h0 / 20.0 * h23);
   f1D2hh[1][1] = h1h0 * (h0 * h0 / 6.0 * (2.0 + 12.0 * h0 * h0 / 20.0 * h2));
   f2D2hh[0][0] = h0 * (h0 - 1) * f2 / (h1 * h1);
   f2D2hh[0][1] = nu / alpha * h0 * h1h0 / h1;
   f2D2hh[1][1] = 0.0;
   f3D2hh[0][0] =
       2 * h0 * (2 * h0 + 1) * h0 * h0 / 6.0 * alpha * alpha / (h12 * h1 * h1)
           + h0 * (h0 + 1) * rho * beta * nu * alpha / 4.0 / (h1h0 * h1 * h1);
   f3D2hh[0][1] = 0.0;
   f3D2hh[1][1] = 0.0;
   double[][] sigmaD2hh = new double[2][2]; // No h0
   for (int loopx = 0; loopx < 2; loopx++) {
     for (int loopy = loopx; loopy < 2; loopy++) {
       sigmaD2hh[loopx][loopy] =
           (sigmaD2ff[0][0] * f1Dh[loopy + 1]
                       + sigmaD2ff[0][1] * f2Dh[loopy + 1]
                       + sigmaD2ff[0][2] * f3Dh[loopy + 1])
                   * f1Dh[loopx + 1]
               + sigmaDf1 * f1D2hh[loopx][loopy]
               + (sigmaD2ff[0][1] * f1Dh[loopy + 1]
                       + sigmaD2ff[1][1] * f2Dh[loopy + 1]
                       + sigmaD2ff[1][2] * f3Dh[loopy + 1])
                   * f2Dh[loopx + 1]
               + sigmaDf2 * f2D2hh[loopx][loopy]
               + (sigmaD2ff[0][2] * f1Dh[loopy + 1]
                       + sigmaD2ff[1][2] * f2Dh[loopy + 1]
                       + sigmaD2ff[2][2] * f3Dh[loopy + 1])
                   * f3Dh[loopx + 1]
               + sigmaDf3 * f3D2hh[loopx][loopy];
     }
   }
   // Third level
   double h1Df = k;
   double h1Dk = forward;
   double h1D2ff = 0.0;
   double h1D2kf = 1.0;
   double h1D2kk = 0.0;
   double h2Df = 1.0 / forward;
   double h2Dk = -1.0 / k;
   double h2D2ff = -1 / (forward * forward);
   double h2D2fk = 0.0;
   double h2D2kk = 1.0 / (k * k);
   volatilityD[0] = sigmaDh1 * h1Df + sigmaDh2 * h2Df;
   volatilityD[1] = sigmaDh1 * h1Dk + sigmaDh2 * h2Dk;
   volatilityD[2] =
       sigmaDf1 * f1Dp[0] + sigmaDf2 * f2Dp[0] + sigmaDf3 * f3Dp[0] + sigmaDf4 * f4Dp[0];
   volatilityD[3] =
       sigmaDf1 * f1Dp[1] + sigmaDf2 * f2Dp[1] + sigmaDf3 * f3Dp[1] + sigmaDf4 * f4Dp[1];
   if (DoubleMath.fuzzyEquals(f2, 0.0, SMALL_Z)) {
     volatilityD[4] = -0.5 * f2 + sigmaDf3 * f3Dp[2];
   } else {
     double xDr;
     if (DoubleMath.fuzzyEquals(rho, 1.0, RHO_EPS)) {
       xDr =
           f2 > 1.0
               ? 1.0 / (1.0 - rho) + (0.5 - f2) / (f2 - 1.0) / (f2 - 1.0)
               : 0.5 * Math.pow(f2 / (1.0 - f2), 2.0)
                   + 0.25 * (f2 - 4.0) * Math.pow(f2 / (f2 - 1.0), 3) / (f2 - 1.0) * (1.0 - rho);
       if (Doubles.isFinite(xDr)) {
         volatilityD[4] =
             sigmaDf1 * f1Dp[2] + sigmaDx * xDr + sigmaDf3 * f3Dp[2] + sigmaDf4 * f4Dp[2];
       } else {
         volatilityD[4] = Double.NEGATIVE_INFINITY;
       }
     } else {
       xDr = (-f2 / sqrtf2 - 1 + (sqrtf2 + f2 - rho) / (1 - rho)) / (sqrtf2 + f2 - rho);
       volatilityD[4] =
           sigmaDf1 * f1Dp[2] + sigmaDx * xDr + sigmaDf3 * f3Dp[2] + sigmaDf4 * f4Dp[2];
     }
   }
   volatilityD[5] =
       sigmaDf1 * f1Dp[3] + sigmaDf2 * f2Dp[3] + sigmaDf3 * f3Dp[3] + sigmaDf4 * f4Dp[3];
   volatilityD2[0][0] =
       (sigmaD2hh[0][0] * h1Df + sigmaD2hh[0][1] * h2Df) * h1Df
           + sigmaDh1 * h1D2ff
           + (sigmaD2hh[0][1] * h1Df + sigmaD2hh[1][1] * h2Df) * h2Df
           + sigmaDh2 * h2D2ff;
   volatilityD2[0][1] =
       (sigmaD2hh[0][0] * h1Dk + sigmaD2hh[0][1] * h2Dk) * h1Df
           + sigmaDh1 * h1D2kf
           + (sigmaD2hh[0][1] * h1Dk + sigmaD2hh[1][1] * h2Dk) * h2Df
           + sigmaDh2 * h2D2fk;
   volatilityD2[1][0] = volatilityD2[0][1];
   volatilityD2[1][1] =
       (sigmaD2hh[0][0] * h1Dk + sigmaD2hh[0][1] * h2Dk) * h1Dk
           + sigmaDh1 * h1D2kk
           + (sigmaD2hh[0][1] * h1Dk + sigmaD2hh[1][1] * h2Dk) * h2Dk
           + sigmaDh2 * h2D2kk;
   return sigma;
 }
コード例 #21
0
/**
 * Exhaustive input sets for every integral type.
 *
 * @author Louis Wasserman
 */
@GwtCompatible
public class MathTesting {
  static final ImmutableSet<RoundingMode> ALL_ROUNDING_MODES =
      ImmutableSet.copyOf(RoundingMode.values());

  static final ImmutableList<RoundingMode> ALL_SAFE_ROUNDING_MODES =
      ImmutableList.of(DOWN, UP, FLOOR, CEILING, HALF_EVEN, HALF_UP, HALF_DOWN);

  // Exponents to test for the pow() function.
  static final ImmutableList<Integer> EXPONENTS =
      ImmutableList.of(0, 1, 2, 3, 4, 7, 10, 15, 20, 25, 40, 70);

  /* Helper function to make a Long value from an Integer. */
  private static final Function<Integer, Long> TO_LONG =
      new Function<Integer, Long>() {
        @Override
        public Long apply(Integer n) {
          return Long.valueOf(n);
        }
      };

  /* Helper function to make a BigInteger value from a Long. */
  private static final Function<Long, BigInteger> TO_BIGINTEGER =
      new Function<Long, BigInteger>() {
        @Override
        public BigInteger apply(Long n) {
          return BigInteger.valueOf(n);
        }
      };

  private static final Function<Integer, Integer> NEGATE_INT =
      new Function<Integer, Integer>() {
        @Override
        public Integer apply(Integer x) {
          return -x;
        }
      };

  private static final Function<Long, Long> NEGATE_LONG =
      new Function<Long, Long>() {
        @Override
        public Long apply(Long x) {
          return -x;
        }
      };

  private static final Function<BigInteger, BigInteger> NEGATE_BIGINT =
      new Function<BigInteger, BigInteger>() {
        @Override
        public BigInteger apply(BigInteger x) {
          return x.negate();
        }
      };

  /*
   * This list contains values that attempt to provoke overflow in integer operations. It contains
   * positive values on or near 2^N for N near multiples of 8 (near byte boundaries).
   */
  static final ImmutableSet<Integer> POSITIVE_INTEGER_CANDIDATES;

  static final Iterable<Integer> NEGATIVE_INTEGER_CANDIDATES;

  static final Iterable<Integer> NONZERO_INTEGER_CANDIDATES;

  static final Iterable<Integer> ALL_INTEGER_CANDIDATES;

  static {
    ImmutableSet.Builder<Integer> intValues = ImmutableSet.builder();
    // Add boundary values manually to avoid over/under flow (this covers 2^N for 0 and 31).
    intValues.add(Integer.MAX_VALUE - 1, Integer.MAX_VALUE);
    // Add values up to 40. This covers cases like "square of a prime" and such.
    for (int i = 1; i <= 40; i++) {
      intValues.add(i);
    }
    // Now add values near 2^N for lots of values of N.
    for (int exponent : asList(2, 3, 4, 9, 15, 16, 17, 24, 25, 30)) {
      int x = 1 << exponent;
      intValues.add(x, x + 1, x - 1);
    }
    intValues.add(9999).add(10000).add(10001).add(1000000); // near powers of 10
    intValues.add(5792).add(5793); // sqrt(2^25) rounded up and down
    POSITIVE_INTEGER_CANDIDATES = intValues.build();
    NEGATIVE_INTEGER_CANDIDATES =
        ImmutableList.copyOf(
            Iterables.concat(
                Iterables.transform(POSITIVE_INTEGER_CANDIDATES, NEGATE_INT),
                ImmutableList.of(Integer.MIN_VALUE)));
    NONZERO_INTEGER_CANDIDATES =
        ImmutableList.copyOf(
            Iterables.concat(POSITIVE_INTEGER_CANDIDATES, NEGATIVE_INTEGER_CANDIDATES));
    ALL_INTEGER_CANDIDATES = Iterables.concat(NONZERO_INTEGER_CANDIDATES, ImmutableList.of(0));
  }

  /*
   * This list contains values that attempt to provoke overflow in long operations. It contains
   * positive values on or near 2^N for N near multiples of 8 (near byte boundaries). This list is
   * a superset of POSITIVE_INTEGER_CANDIDATES.
   */
  static final ImmutableSet<Long> POSITIVE_LONG_CANDIDATES;

  static final Iterable<Long> NEGATIVE_LONG_CANDIDATES;

  static final Iterable<Long> NONZERO_LONG_CANDIDATES;

  static final Iterable<Long> ALL_LONG_CANDIDATES;

  static {
    ImmutableSet.Builder<Long> longValues = ImmutableSet.builder();
    // First of all add all the integer candidate values.
    longValues.addAll(Iterables.transform(POSITIVE_INTEGER_CANDIDATES, TO_LONG));
    // Add boundary values manually to avoid over/under flow (this covers 2^N for 31 and 63).
    longValues.add(Integer.MAX_VALUE + 1L, Long.MAX_VALUE - 1L, Long.MAX_VALUE);
    // Now add values near 2^N for lots of values of N.
    for (int exponent : asList(32, 33, 39, 40, 41, 47, 48, 49, 55, 56, 57)) {
      long x = 1L << exponent;
      longValues.add(x, x + 1, x - 1);
    }
    longValues.add(194368031998L).add(194368031999L); // sqrt(2^75) rounded up and down
    POSITIVE_LONG_CANDIDATES = longValues.build();
    NEGATIVE_LONG_CANDIDATES =
        Iterables.concat(
            Iterables.transform(POSITIVE_LONG_CANDIDATES, NEGATE_LONG),
            ImmutableList.of(Long.MIN_VALUE));
    NONZERO_LONG_CANDIDATES = Iterables.concat(POSITIVE_LONG_CANDIDATES, NEGATIVE_LONG_CANDIDATES);
    ALL_LONG_CANDIDATES = Iterables.concat(NONZERO_LONG_CANDIDATES, ImmutableList.of(0L));
  }

  /*
   * This list contains values that attempt to provoke overflow in big integer operations. It
   * contains positive values on or near 2^N for N near multiples of 8 (near byte boundaries). This
   * list is a superset of POSITIVE_LONG_CANDIDATES.
   */
  static final ImmutableSet<BigInteger> POSITIVE_BIGINTEGER_CANDIDATES;

  static final Iterable<BigInteger> NEGATIVE_BIGINTEGER_CANDIDATES;

  static final Iterable<BigInteger> NONZERO_BIGINTEGER_CANDIDATES;

  static final Iterable<BigInteger> ALL_BIGINTEGER_CANDIDATES;

  static {
    ImmutableSet.Builder<BigInteger> bigValues = ImmutableSet.builder();
    // First of all add all the long candidate values.
    bigValues.addAll(Iterables.transform(POSITIVE_LONG_CANDIDATES, TO_BIGINTEGER));
    // Add boundary values manually to avoid over/under flow.
    bigValues.add(BigInteger.valueOf(Long.MAX_VALUE).add(ONE));
    // Now add values near 2^N for lots of values of N.
    for (int exponent :
        asList(
            64,
            65,
            71,
            72,
            73,
            79,
            80,
            81,
            255,
            256,
            257,
            511,
            512,
            513,
            Double.MAX_EXPONENT - 1,
            Double.MAX_EXPONENT,
            Double.MAX_EXPONENT + 1)) {
      BigInteger x = ONE.shiftLeft(exponent);
      bigValues.add(x, x.add(ONE), x.subtract(ONE));
    }
    bigValues.add(new BigInteger("218838949120258359057546633")); // sqrt(2^175) rounded up and
    // down
    bigValues.add(new BigInteger("218838949120258359057546634"));
    POSITIVE_BIGINTEGER_CANDIDATES = bigValues.build();
    NEGATIVE_BIGINTEGER_CANDIDATES =
        Iterables.transform(POSITIVE_BIGINTEGER_CANDIDATES, NEGATE_BIGINT);
    NONZERO_BIGINTEGER_CANDIDATES =
        Iterables.concat(POSITIVE_BIGINTEGER_CANDIDATES, NEGATIVE_BIGINTEGER_CANDIDATES);
    ALL_BIGINTEGER_CANDIDATES =
        Iterables.concat(NONZERO_BIGINTEGER_CANDIDATES, ImmutableList.of(ZERO));
  }

  static final ImmutableSet<Double> INTEGRAL_DOUBLE_CANDIDATES;
  static final ImmutableSet<Double> FRACTIONAL_DOUBLE_CANDIDATES;
  static final Iterable<Double> INFINITIES =
      Doubles.asList(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
  static final Iterable<Double> FINITE_DOUBLE_CANDIDATES;
  static final Iterable<Double> POSITIVE_FINITE_DOUBLE_CANDIDATES;
  static final Iterable<Double> ALL_DOUBLE_CANDIDATES;
  static final Iterable<Double> DOUBLE_CANDIDATES_EXCEPT_NAN;

  static {
    ImmutableSet.Builder<Double> integralBuilder = ImmutableSet.builder();
    ImmutableSet.Builder<Double> fractionalBuilder = ImmutableSet.builder();
    integralBuilder.addAll(Doubles.asList(0.0, -0.0, Double.MAX_VALUE, -Double.MAX_VALUE));
    // Add small multiples of MIN_VALUE and MIN_NORMAL
    for (int scale = 1; scale <= 4; scale++) {
      for (double d : Doubles.asList(Double.MIN_VALUE, Double.MIN_NORMAL)) {
        fractionalBuilder.add(d * scale).add(-d * scale);
      }
    }
    for (double d :
        Doubles.asList(
            0,
            1,
            2,
            7,
            51,
            102,
            Math.scalb(1.0, 53),
            Integer.MIN_VALUE,
            Integer.MAX_VALUE,
            Long.MIN_VALUE,
            Long.MAX_VALUE)) {
      for (double delta : Doubles.asList(0.0, 1.0, 2.0)) {
        integralBuilder.addAll(Doubles.asList(d + delta, d - delta, -d - delta, -d + delta));
      }
      for (double delta : Doubles.asList(0.01, 0.1, 0.25, 0.499, 0.5, 0.501, 0.7, 0.8)) {
        double x = d + delta;
        if (x != Math.round(x)) {
          fractionalBuilder.add(x);
        }
      }
    }
    INTEGRAL_DOUBLE_CANDIDATES = integralBuilder.build();
    fractionalBuilder.add(1.414).add(1.415).add(Math.sqrt(2));
    fractionalBuilder.add(5.656).add(5.657).add(4 * Math.sqrt(2));
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      double x = 1 / d;
      if (x != Math.rint(x)) {
        fractionalBuilder.add(x);
      }
    }
    FRACTIONAL_DOUBLE_CANDIDATES = fractionalBuilder.build();
    FINITE_DOUBLE_CANDIDATES =
        Iterables.concat(FRACTIONAL_DOUBLE_CANDIDATES, INTEGRAL_DOUBLE_CANDIDATES);
    POSITIVE_FINITE_DOUBLE_CANDIDATES =
        Iterables.filter(
            FINITE_DOUBLE_CANDIDATES,
            new Predicate<Double>() {
              @Override
              public boolean apply(Double input) {
                return input.doubleValue() > 0.0;
              }
            });
    DOUBLE_CANDIDATES_EXCEPT_NAN = Iterables.concat(FINITE_DOUBLE_CANDIDATES, INFINITIES);
    ALL_DOUBLE_CANDIDATES = Iterables.concat(DOUBLE_CANDIDATES_EXCEPT_NAN, asList(Double.NaN));
  }
}
コード例 #22
0
ファイル: DoubleMathTest.java プロジェクト: hinike/opera
/**
 * Tests for {@code DoubleMath}.
 *
 * @author Louis Wasserman
 */
public class DoubleMathTest extends TestCase {

  private static final BigDecimal MAX_INT_AS_BIG_DECIMAL = BigDecimal.valueOf(Integer.MAX_VALUE);
  private static final BigDecimal MIN_INT_AS_BIG_DECIMAL = BigDecimal.valueOf(Integer.MIN_VALUE);

  private static final BigDecimal MAX_LONG_AS_BIG_DECIMAL = BigDecimal.valueOf(Long.MAX_VALUE);
  private static final BigDecimal MIN_LONG_AS_BIG_DECIMAL = BigDecimal.valueOf(Long.MIN_VALUE);

  public void testConstantsMaxFactorial() {
    BigInteger MAX_DOUBLE_VALUE = BigDecimal.valueOf(Double.MAX_VALUE).toBigInteger();
    assertTrue(BigIntegerMath.factorial(DoubleMath.MAX_FACTORIAL).compareTo(MAX_DOUBLE_VALUE) <= 0);
    assertTrue(
        BigIntegerMath.factorial(DoubleMath.MAX_FACTORIAL + 1).compareTo(MAX_DOUBLE_VALUE) > 0);
  }

  public void testConstantsEverySixteenthFactorial() {
    for (int i = 0, n = 0; n <= DoubleMath.MAX_FACTORIAL; i++, n += 16) {
      assertEquals(
          BigIntegerMath.factorial(n).doubleValue(), DoubleMath.EVERY_SIXTEENTH_FACTORIAL[i]);
    }
  }

  public void testRoundIntegralDoubleToInt() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        boolean isInBounds =
            expected.compareTo(MAX_INT_AS_BIG_DECIMAL) <= 0
                & expected.compareTo(MIN_INT_AS_BIG_DECIMAL) >= 0;

        try {
          assertEquals(expected.intValue(), DoubleMath.roundToInt(d, mode));
          assertTrue(isInBounds);
        } catch (ArithmeticException e) {
          assertFalse(isInBounds);
        }
      }
    }
  }

  public void testRoundFractionalDoubleToInt() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        boolean isInBounds =
            expected.compareTo(MAX_INT_AS_BIG_DECIMAL) <= 0
                & expected.compareTo(MIN_INT_AS_BIG_DECIMAL) >= 0;

        try {
          assertEquals(expected.intValue(), DoubleMath.roundToInt(d, mode));
          assertTrue(isInBounds);
        } catch (ArithmeticException e) {
          assertFalse(isInBounds);
        }
      }
    }
  }

  public void testRoundExactIntegralDoubleToInt() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      BigDecimal expected = new BigDecimal(d).setScale(0, UNNECESSARY);
      boolean isInBounds =
          expected.compareTo(MAX_INT_AS_BIG_DECIMAL) <= 0
              & expected.compareTo(MIN_INT_AS_BIG_DECIMAL) >= 0;

      try {
        assertEquals(expected.intValue(), DoubleMath.roundToInt(d, UNNECESSARY));
        assertTrue(isInBounds);
      } catch (ArithmeticException e) {
        assertFalse(isInBounds);
      }
    }
  }

  public void testRoundExactFractionalDoubleToIntFails() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      try {
        DoubleMath.roundToInt(d, UNNECESSARY);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundNaNToIntAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToInt(Double.NaN, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundInfiniteToIntAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToInt(Double.POSITIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
      try {
        DoubleMath.roundToInt(Double.NEGATIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundIntegralDoubleToLong() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        boolean isInBounds =
            expected.compareTo(MAX_LONG_AS_BIG_DECIMAL) <= 0
                & expected.compareTo(MIN_LONG_AS_BIG_DECIMAL) >= 0;

        try {
          assertEquals(expected.longValue(), DoubleMath.roundToLong(d, mode));
          assertTrue(isInBounds);
        } catch (ArithmeticException e) {
          assertFalse(isInBounds);
        }
      }
    }
  }

  public void testRoundFractionalDoubleToLong() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        boolean isInBounds =
            expected.compareTo(MAX_LONG_AS_BIG_DECIMAL) <= 0
                & expected.compareTo(MIN_LONG_AS_BIG_DECIMAL) >= 0;

        try {
          assertEquals(expected.longValue(), DoubleMath.roundToLong(d, mode));
          assertTrue(isInBounds);
        } catch (ArithmeticException e) {
          assertFalse(isInBounds);
        }
      }
    }
  }

  public void testRoundExactIntegralDoubleToLong() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      // every mode except UNNECESSARY
      BigDecimal expected = new BigDecimal(d).setScale(0, UNNECESSARY);
      boolean isInBounds =
          expected.compareTo(MAX_LONG_AS_BIG_DECIMAL) <= 0
              & expected.compareTo(MIN_LONG_AS_BIG_DECIMAL) >= 0;

      try {
        assertEquals(expected.longValue(), DoubleMath.roundToLong(d, UNNECESSARY));
        assertTrue(isInBounds);
      } catch (ArithmeticException e) {
        assertFalse(isInBounds);
      }
    }
  }

  public void testRoundExactFractionalDoubleToLongFails() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      try {
        DoubleMath.roundToLong(d, UNNECESSARY);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundNaNToLongAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToLong(Double.NaN, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundInfiniteToLongAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToLong(Double.POSITIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
      try {
        DoubleMath.roundToLong(Double.NEGATIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundIntegralDoubleToBigInteger() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        assertEquals(expected.toBigInteger(), DoubleMath.roundToBigInteger(d, mode));
      }
    }
  }

  public void testRoundFractionalDoubleToBigInteger() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      for (RoundingMode mode : ALL_SAFE_ROUNDING_MODES) {
        BigDecimal expected = new BigDecimal(d).setScale(0, mode);
        assertEquals(expected.toBigInteger(), DoubleMath.roundToBigInteger(d, mode));
      }
    }
  }

  public void testRoundExactIntegralDoubleToBigInteger() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      BigDecimal expected = new BigDecimal(d).setScale(0, UNNECESSARY);
      assertEquals(expected.toBigInteger(), DoubleMath.roundToBigInteger(d, UNNECESSARY));
    }
  }

  public void testRoundExactFractionalDoubleToBigIntegerFails() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      try {
        DoubleMath.roundToBigInteger(d, UNNECESSARY);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundNaNToBigIntegerAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToBigInteger(Double.NaN, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundInfiniteToBigIntegerAlwaysFails() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      try {
        DoubleMath.roundToBigInteger(Double.POSITIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
      try {
        DoubleMath.roundToBigInteger(Double.NEGATIVE_INFINITY, mode);
        fail("Expected ArithmeticException");
      } catch (ArithmeticException expected) {
      }
    }
  }

  public void testRoundLog2Floor() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      int log2 = DoubleMath.log2(d, FLOOR);
      assertTrue(StrictMath.pow(2.0, log2) <= d);
      assertTrue(StrictMath.pow(2.0, log2 + 1) > d);
    }
  }

  public void testRoundLog2Ceiling() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      int log2 = DoubleMath.log2(d, CEILING);
      assertTrue(StrictMath.pow(2.0, log2) >= d);
      double z = StrictMath.pow(2.0, log2 - 1);
      assertTrue(z < d);
    }
  }

  public void testRoundLog2Down() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      int log2 = DoubleMath.log2(d, DOWN);
      if (d >= 1.0) {
        assertTrue(log2 >= 0);
        assertTrue(StrictMath.pow(2.0, log2) <= d);
        assertTrue(StrictMath.pow(2.0, log2 + 1) > d);
      } else {
        assertTrue(log2 <= 0);
        assertTrue(StrictMath.pow(2.0, log2) >= d);
        assertTrue(StrictMath.pow(2.0, log2 - 1) < d);
      }
    }
  }

  public void testRoundLog2Up() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      int log2 = DoubleMath.log2(d, UP);
      if (d >= 1.0) {
        assertTrue(log2 >= 0);
        assertTrue(StrictMath.pow(2.0, log2) >= d);
        assertTrue(StrictMath.pow(2.0, log2 - 1) < d);
      } else {
        assertTrue(log2 <= 0);
        assertTrue(StrictMath.pow(2.0, log2) <= d);
        assertTrue(StrictMath.pow(2.0, log2 + 1) > d);
      }
    }
  }

  public void testRoundLog2Half() {
    // We don't expect perfect rounding accuracy.
    for (int exp : asList(-1022, -50, -1, 0, 1, 2, 3, 4, 100, 1022, 1023)) {
      for (RoundingMode mode : asList(HALF_EVEN, HALF_UP, HALF_DOWN)) {
        double x = Math.scalb(Math.sqrt(2) + 0.001, exp);
        double y = Math.scalb(Math.sqrt(2) - 0.001, exp);
        if (exp < 0) {
          assertEquals(exp + 1, DoubleMath.log2(x, mode));
          assertEquals(exp, DoubleMath.log2(y, mode));
        } else {
          assertEquals(exp + 1, DoubleMath.log2(x, mode));
          assertEquals(exp, DoubleMath.log2(y, mode));
        }
      }
    }
  }

  public void testRoundLog2Exact() {
    for (double x : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      boolean isPowerOfTwo = StrictMath.pow(2.0, DoubleMath.log2(x, FLOOR)) == x;
      try {
        int log2 = DoubleMath.log2(x, UNNECESSARY);
        assertEquals(x, Math.scalb(1.0, log2));
        assertTrue(isPowerOfTwo);
      } catch (ArithmeticException e) {
        assertFalse(isPowerOfTwo);
      }
    }
  }

  public void testRoundLog2ThrowsOnZerosInfinitiesAndNaN() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      for (double d :
          asList(0.0, -0.0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, Double.NaN)) {
        try {
          DoubleMath.log2(d, mode);
          fail("Expected IllegalArgumentException");
        } catch (IllegalArgumentException e) {
        }
      }
    }
  }

  public void testRoundLog2ThrowsOnNegative() {
    for (RoundingMode mode : ALL_ROUNDING_MODES) {
      for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
        try {
          DoubleMath.log2(-d, mode);
          fail("Expected IllegalArgumentException");
        } catch (IllegalArgumentException e) {
        }
      }
    }
  }

  public void testIsPowerOfTwoYes() {
    for (int i = -1074; i <= 1023; i++) {
      assertTrue(DoubleMath.isPowerOfTwo(StrictMath.pow(2.0, i)));
    }
  }

  public void testIsPowerOfTwo() {
    for (double x : ALL_DOUBLE_CANDIDATES) {
      boolean expected =
          x > 0
              && !Double.isInfinite(x)
              && !Double.isNaN(x)
              && StrictMath.pow(2.0, DoubleMath.log2(x, FLOOR)) == x;
      assertEquals(expected, DoubleMath.isPowerOfTwo(x));
    }
  }

  public void testLog2Accuracy() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      double dmLog2 = DoubleMath.log2(d);
      double trueLog2 = trueLog2(d);
      assertTrue(Math.abs(dmLog2 - trueLog2) <= Math.ulp(trueLog2));
    }
  }

  public void testLog2SemiMonotonic() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      assertTrue(DoubleMath.log2(d + 0.01) >= DoubleMath.log2(d));
    }
  }

  public void testLog2Negative() {
    for (double d : POSITIVE_FINITE_DOUBLE_CANDIDATES) {
      assertTrue(Double.isNaN(DoubleMath.log2(-d)));
    }
  }

  public void testLog2Zero() {
    assertEquals(Double.NEGATIVE_INFINITY, DoubleMath.log2(0.0));
    assertEquals(Double.NEGATIVE_INFINITY, DoubleMath.log2(-0.0));
  }

  public void testLog2NaNInfinity() {
    assertEquals(Double.POSITIVE_INFINITY, DoubleMath.log2(Double.POSITIVE_INFINITY));
    assertTrue(Double.isNaN(DoubleMath.log2(Double.NEGATIVE_INFINITY)));
    assertTrue(Double.isNaN(DoubleMath.log2(Double.NaN)));
  }

  private strictfp double trueLog2(double d) {
    double trueLog2 = StrictMath.log(d) / StrictMath.log(2);
    // increment until it's >= the true value
    while (StrictMath.pow(2.0, trueLog2) < d) {
      trueLog2 = StrictMath.nextUp(trueLog2);
    }
    // decrement until it's <= the true value
    while (StrictMath.pow(2.0, trueLog2) > d) {
      trueLog2 = StrictMath.nextAfter(trueLog2, Double.NEGATIVE_INFINITY);
    }
    if (StrictMath.abs(StrictMath.pow(2.0, trueLog2) - d)
        > StrictMath.abs(StrictMath.pow(2.0, StrictMath.nextUp(trueLog2)) - d)) {
      trueLog2 = StrictMath.nextUp(trueLog2);
    }
    return trueLog2;
  }

  public void testIsMathematicalIntegerIntegral() {
    for (double d : INTEGRAL_DOUBLE_CANDIDATES) {
      assertTrue(DoubleMath.isMathematicalInteger(d));
    }
  }

  public void testIsMathematicalIntegerFractional() {
    for (double d : FRACTIONAL_DOUBLE_CANDIDATES) {
      assertFalse(DoubleMath.isMathematicalInteger(d));
    }
  }

  public void testIsMathematicalIntegerNotFinite() {
    for (double d : Arrays.asList(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, Double.NaN)) {
      assertFalse(DoubleMath.isMathematicalInteger(d));
    }
  }

  public void testFactorial() {
    for (int i = 0; i <= DoubleMath.MAX_FACTORIAL; i++) {
      double actual = BigIntegerMath.factorial(i).doubleValue();
      double result = DoubleMath.factorial(i);
      assertEquals(actual, result, Math.ulp(actual));
    }
  }

  public void testFactorialTooHigh() {
    assertEquals(Double.POSITIVE_INFINITY, DoubleMath.factorial(DoubleMath.MAX_FACTORIAL + 1));
    assertEquals(Double.POSITIVE_INFINITY, DoubleMath.factorial(DoubleMath.MAX_FACTORIAL + 20));
  }

  public void testFactorialNegative() {
    for (int n : NEGATIVE_INTEGER_CANDIDATES) {
      try {
        DoubleMath.factorial(n);
        fail("Expected IllegalArgumentException");
      } catch (IllegalArgumentException expected) {
      }
    }
  }

  private static final ImmutableList<Double> FINITE_TOLERANCE_CANDIDATES =
      ImmutableList.of(-0.0, 0.0, 1.0, 100.0, 10000.0, Double.MAX_VALUE);

  private static final Iterable<Double> TOLERANCE_CANDIDATES =
      Iterables.concat(FINITE_TOLERANCE_CANDIDATES, ImmutableList.of(Double.POSITIVE_INFINITY));

  private static final List<Double> BAD_TOLERANCE_CANDIDATES =
      Doubles.asList(
          -Double.MIN_VALUE,
          -Double.MIN_NORMAL,
          -1,
          -20,
          Double.NaN,
          Double.NEGATIVE_INFINITY,
          -0.001);

  public void testFuzzyEqualsFinite() {
    for (double a : FINITE_DOUBLE_CANDIDATES) {
      for (double b : FINITE_DOUBLE_CANDIDATES) {
        for (double tolerance : FINITE_TOLERANCE_CANDIDATES) {
          assertEquals(Math.abs(a - b) <= tolerance, DoubleMath.fuzzyEquals(a, b, tolerance));
        }
      }
    }
  }

  public void testFuzzyInfiniteVersusFiniteWithFiniteTolerance() {
    for (double inf : INFINITIES) {
      for (double a : FINITE_DOUBLE_CANDIDATES) {
        for (double tolerance : FINITE_TOLERANCE_CANDIDATES) {
          assertFalse(DoubleMath.fuzzyEquals(a, inf, tolerance));
          assertFalse(DoubleMath.fuzzyEquals(inf, a, tolerance));
        }
      }
    }
  }

  public void testFuzzyInfiniteVersusInfiniteWithFiniteTolerance() {
    for (double inf : INFINITIES) {
      for (double tolerance : FINITE_TOLERANCE_CANDIDATES) {
        assertTrue(DoubleMath.fuzzyEquals(inf, inf, tolerance));
        assertFalse(DoubleMath.fuzzyEquals(inf, -inf, tolerance));
      }
    }
  }

  public void testFuzzyEqualsInfiniteTolerance() {
    for (double a : DOUBLE_CANDIDATES_EXCEPT_NAN) {
      for (double b : DOUBLE_CANDIDATES_EXCEPT_NAN) {
        assertTrue(DoubleMath.fuzzyEquals(a, b, Double.POSITIVE_INFINITY));
      }
    }
  }

  public void testFuzzyEqualsOneNaN() {
    for (double a : DOUBLE_CANDIDATES_EXCEPT_NAN) {
      for (double tolerance : TOLERANCE_CANDIDATES) {
        assertFalse(DoubleMath.fuzzyEquals(a, Double.NaN, tolerance));
        assertFalse(DoubleMath.fuzzyEquals(Double.NaN, a, tolerance));
      }
    }
  }

  public void testFuzzyEqualsTwoNaNs() {
    for (double tolerance : TOLERANCE_CANDIDATES) {
      assertTrue(DoubleMath.fuzzyEquals(Double.NaN, Double.NaN, tolerance));
    }
  }

  public void testFuzzyEqualsZeroTolerance() {
    // make sure we test -0 tolerance
    for (double zero : Doubles.asList(0.0, -0.0)) {
      for (double a : ALL_DOUBLE_CANDIDATES) {
        for (double b : ALL_DOUBLE_CANDIDATES) {
          assertEquals(
              a == b || (Double.isNaN(a) && Double.isNaN(b)), DoubleMath.fuzzyEquals(a, b, zero));
        }
      }
    }
  }

  public void testFuzzyEqualsBadTolerance() {
    for (double tolerance : BAD_TOLERANCE_CANDIDATES) {
      try {
        DoubleMath.fuzzyEquals(1, 2, tolerance);
        fail("Expected IllegalArgumentException");
      } catch (IllegalArgumentException expected) {
        // success
      }
    }
  }

  public void testFuzzyCompare() {
    for (double a : ALL_DOUBLE_CANDIDATES) {
      for (double b : ALL_DOUBLE_CANDIDATES) {
        for (double tolerance : TOLERANCE_CANDIDATES) {
          int expected = DoubleMath.fuzzyEquals(a, b, tolerance) ? 0 : Double.compare(a, b);
          int actual = DoubleMath.fuzzyCompare(a, b, tolerance);
          assertEquals(Integer.signum(expected), Integer.signum(actual));
        }
      }
    }
  }

  public void testFuzzyCompareBadTolerance() {
    for (double tolerance : BAD_TOLERANCE_CANDIDATES) {
      try {
        DoubleMath.fuzzyCompare(1, 2, tolerance);
        fail("Expected IllegalArgumentException");
      } catch (IllegalArgumentException expected) {
        // success
      }
    }
  }

  public void testNullPointers() {
    NullPointerTester tester = new NullPointerTester();
    tester.setDefault(RoundingMode.class, FLOOR);
    tester.setDefault(double.class, 3.0);
    tester.testAllPublicStaticMethods(DoubleMath.class);
  }
}
コード例 #23
0
 @Override
 public PersistedData serializeCollection(Collection<Double> value, SerializationContext context) {
   return context.create(Doubles.toArray(value));
 }
コード例 #24
0
  /**
   * Tangent normalize a coverage profile.
   *
   * <p>Notes about the Spark tangent normalization can be found in docs/PoN/
   *
   * @param pon Not {@code null}
   * @param targetFactorNormalizedCounts ReadCountCollection of counts that have already been
   *     normalized fully (typically, including the target factor normalization). I.e. a coverage
   *     profile The column names should be intact. Not {@code null} See {@link
   *     TangentNormalizer::createCoverageProfile}
   * @return never {@code null}
   */
  private static TangentNormalizationResult tangentNormalize(
      final PoN pon, final ReadCountCollection targetFactorNormalizedCounts, JavaSparkContext ctx) {

    Utils.nonNull(pon, "PoN cannot be null.");
    Utils.nonNull(targetFactorNormalizedCounts, "targetFactorNormalizedCounts cannot be null.");
    Utils.nonNull(
        targetFactorNormalizedCounts.columnNames(),
        "targetFactorNormalizedCounts column names cannot be null.");
    ParamUtils.isPositive(
        targetFactorNormalizedCounts.columnNames().size(),
        "targetFactorNormalizedCounts column names cannot be an empty list.");

    final Case2PoNTargetMapper targetMapper =
        new Case2PoNTargetMapper(targetFactorNormalizedCounts.targets(), pon.getPanelTargetNames());

    // The input counts with rows (targets) sorted so that they match the PoN's order.
    final RealMatrix tangentNormalizationRawInputCounts =
        targetMapper.fromCaseToPoNCounts(targetFactorNormalizedCounts.counts());

    // We prepare the counts for tangent normalization.
    final RealMatrix tangentNormalizationInputCounts =
        composeTangentNormalizationInputMatrix(tangentNormalizationRawInputCounts);

    if (ctx == null) {

      // Calculate the beta-hats for the input read count columns (samples).
      logger.info("Calculating beta hats...");
      final RealMatrix tangentBetaHats =
          pon.betaHats(tangentNormalizationInputCounts, true, EPSILON);

      // Actual tangent normalization step.
      logger.info(
          "Performing actual tangent normalization ("
              + tangentNormalizationInputCounts.getColumnDimension()
              + " columns)...");
      final RealMatrix tangentNormalizedCounts =
          pon.tangentNormalization(tangentNormalizationInputCounts, tangentBetaHats, true);

      // Output the tangent normalized counts.
      logger.info("Post-processing tangent normalization results...");
      final ReadCountCollection tangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizedCounts, targetFactorNormalizedCounts.columnNames());
      final ReadCountCollection preTangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizationInputCounts, targetFactorNormalizedCounts.columnNames());

      return new TangentNormalizationResult(
          tangentNormalized, preTangentNormalized, tangentBetaHats, targetFactorNormalizedCounts);

    } else {

      /*
      Using Spark:  the code here is a little more complex for optimization purposes.

      Please see notes in docs/PoN ...

      Ahat^T = (C^T P^T) A^T
      Therefore, C^T is the RowMatrix

      pinv: P
      panel: A
      projection: Ahat
      cases: C
      betahat: C^T P^T
      tangentNormalizedCounts: C - Ahat
       */
      final RealMatrix pinv = pon.getReducedPanelPInverseCounts();
      final RealMatrix panel = pon.getReducedPanelCounts();

      // Make the C^T a distributed matrix (RowMatrix)
      final RowMatrix caseTDistMat =
          SparkConverter.convertRealMatrixToSparkRowMatrix(
              ctx, tangentNormalizationInputCounts.transpose(), TN_NUM_SLICES_SPARK);

      // Spark local matrices (transposed)
      final Matrix pinvTLocalMat =
          new DenseMatrix(
                  pinv.getRowDimension(),
                  pinv.getColumnDimension(),
                  Doubles.concat(pinv.getData()),
                  true)
              .transpose();
      final Matrix panelTLocalMat =
          new DenseMatrix(
                  panel.getRowDimension(),
                  panel.getColumnDimension(),
                  Doubles.concat(panel.getData()),
                  true)
              .transpose();

      // Calculate the projection transpose in a distributed matrix, then convert to Apache Commons
      // matrix (not transposed)
      final RowMatrix betahatDistMat = caseTDistMat.multiply(pinvTLocalMat);
      final RowMatrix projectionTDistMat = betahatDistMat.multiply(panelTLocalMat);
      final RealMatrix projection =
          SparkConverter.convertSparkRowMatrixToRealMatrix(
                  projectionTDistMat, tangentNormalizationInputCounts.transpose().getRowDimension())
              .transpose();

      // Subtract the cases from the projection
      final RealMatrix tangentNormalizedCounts =
          tangentNormalizationInputCounts.subtract(projection);

      // Construct the result object and return it with the correct targets.
      final ReadCountCollection tangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizedCounts, targetFactorNormalizedCounts.columnNames());
      final ReadCountCollection preTangentNormalized =
          targetMapper.fromPoNtoCaseCountCollection(
              tangentNormalizationInputCounts, targetFactorNormalizedCounts.columnNames());
      final RealMatrix tangentBetaHats =
          SparkConverter.convertSparkRowMatrixToRealMatrix(
              betahatDistMat, tangentNormalizedCounts.getColumnDimension());
      return new TangentNormalizationResult(
          tangentNormalized,
          preTangentNormalized,
          tangentBetaHats.transpose(),
          targetFactorNormalizedCounts);
    }
  }
コード例 #25
0
ファイル: MathFunctions.java プロジェクト: rayzhang123/presto
 @Description("test if value is finite")
 @ScalarFunction
 @SqlType(BooleanType.NAME)
 public static boolean isFinite(@SqlType(DoubleType.NAME) double num) {
   return Doubles.isFinite(num);
 }
コード例 #26
0
ファイル: ExprEquivalence.java プロジェクト: chone/plovr
 @Override
 protected Integer visitFloatNode(FloatNode node) {
   return Doubles.hashCode(node.getValue());
 }
コード例 #27
0
 private static ImmutableList<Double> values(double... values) {
   return ImmutableList.copyOf(Doubles.asList(values));
 }
コード例 #28
0
 @Override
 public int compareTo(Path that) {
   return Doubles.compare(this.score, that.score);
 }
  @Override
  public PiecewisePolynomialResultsWithSensitivity interpolateWithSensitivity(
      final double[] xValues, final double[] yValues) {

    ArgumentChecker.notNull(xValues, "xValues");
    ArgumentChecker.notNull(yValues, "yValues");

    ArgumentChecker.isTrue(
        xValues.length == yValues.length | xValues.length + 2 == yValues.length,
        "(xValues length = yValues length) or (xValues length + 2 = yValues length)");
    ArgumentChecker.isTrue(xValues.length > 2, "Data points should be more than 2");

    final int nDataPts = xValues.length;
    final int yValuesLen = yValues.length;

    for (int i = 0; i < nDataPts; ++i) {
      ArgumentChecker.isFalse(Double.isNaN(xValues[i]), "xValues containing NaN");
      ArgumentChecker.isFalse(Double.isInfinite(xValues[i]), "xValues containing Infinity");
    }
    for (int i = 0; i < yValuesLen; ++i) {
      ArgumentChecker.isFalse(Double.isNaN(yValues[i]), "yValues containing NaN");
      ArgumentChecker.isFalse(Double.isInfinite(yValues[i]), "yValues containing Infinity");
    }

    for (int i = 0; i < nDataPts - 1; ++i) {
      for (int j = i + 1; j < nDataPts; ++j) {
        ArgumentChecker.isFalse(xValues[i] == xValues[j], "xValues should be distinct");
      }
    }

    double[] yValuesSrt = new double[nDataPts];
    if (nDataPts == yValuesLen) {
      yValuesSrt = Arrays.copyOf(yValues, nDataPts);
    } else {
      yValuesSrt = Arrays.copyOfRange(yValues, 1, nDataPts + 1);
    }

    final double[] intervals = _solver.intervalsCalculator(xValues);
    final double[] slopes = _solver.slopesCalculator(yValuesSrt, intervals);
    double[][] slopesSensitivity = _solver.slopeSensitivityCalculator(intervals);
    final DoubleMatrix1D[] firstWithSensitivity = new DoubleMatrix1D[nDataPts + 1];
    final DoubleMatrix1D[] secondWithSensitivity = new DoubleMatrix1D[nDataPts + 1];
    final PiecewisePolynomialResult result = _method.interpolate(xValues, yValues);

    ArgumentChecker.isTrue(result.getOrder() >= 3, "Primary interpolant should be degree >= 2");

    final double[] initialFirst = _function.differentiate(result, xValues).getData()[0];
    final double[] initialSecond = _function.differentiateTwice(result, xValues).getData()[0];
    double[] first = firstDerivativeCalculator(yValuesSrt, intervals, slopes, initialFirst);
    boolean modFirst = false;
    int k;
    double[] aValues = aValuesCalculator(slopes, first);
    double[] bValues = bValuesCalculator(slopes, first);
    double[][] intervalsA = getIntervalsA(intervals, slopes, first, bValues);
    double[][] intervalsB = getIntervalsB(intervals, slopes, first, aValues);
    while (modFirst == false) {
      k = 0;
      for (int i = 0; i < nDataPts - 2; ++i) {
        if (first[i + 1] > 0.) {
          if (intervalsA[i + 1][1] + Math.abs(intervalsA[i + 1][1]) * ERROR
                  < intervalsB[i][0] - Math.abs(intervalsB[i][0]) * ERROR
              | intervalsA[i + 1][0] - Math.abs(intervalsA[i + 1][0]) * ERROR
                  > intervalsB[i][1] + Math.abs(intervalsB[i][1]) * ERROR) {
            ++k;
            first[i + 1] = firstDerivativesRecalculator(intervals, slopes, aValues, bValues, i + 1);
          }
        }
      }
      if (k == 0) {
        modFirst = true;
      }
      aValues = aValuesCalculator(slopes, first);
      bValues = bValuesCalculator(slopes, first);
      intervalsA = getIntervalsA(intervals, slopes, first, bValues);
      intervalsB = getIntervalsB(intervals, slopes, first, aValues);
    }
    final double[] second = secondDerivativeCalculator(initialSecond, intervalsA, intervalsB);
    firstWithSensitivity[0] = new DoubleMatrix1D(first);
    secondWithSensitivity[0] = new DoubleMatrix1D(second);

    /*
     * Centered finite difference method is used for computing node sensitivity
     */
    int nExtra = (nDataPts == yValuesLen) ? 0 : 1;
    final double[] yValuesUp = Arrays.copyOf(yValues, nDataPts + 2 * nExtra);
    final double[] yValuesDw = Arrays.copyOf(yValues, nDataPts + 2 * nExtra);
    final double[][] tmpFirst = new double[nDataPts][nDataPts];
    final double[][] tmpSecond = new double[nDataPts][nDataPts];
    for (int l = nExtra; l < nDataPts + nExtra; ++l) {
      final double den = Math.abs(yValues[l]) < SMALL ? EPS : yValues[l] * EPS;
      yValuesUp[l] = Math.abs(yValues[l]) < SMALL ? EPS : yValues[l] * (1. + EPS);
      yValuesDw[l] = Math.abs(yValues[l]) < SMALL ? -EPS : yValues[l] * (1. - EPS);
      final double[] yValuesSrtUp = Arrays.copyOfRange(yValuesUp, nExtra, nDataPts + nExtra);
      final double[] yValuesSrtDw = Arrays.copyOfRange(yValuesDw, nExtra, nDataPts + nExtra);

      final DoubleMatrix1D[] yValuesUpDw =
          new DoubleMatrix1D[] {new DoubleMatrix1D(yValuesUp), new DoubleMatrix1D(yValuesDw)};
      final DoubleMatrix1D[] yValuesSrtUpDw =
          new DoubleMatrix1D[] {new DoubleMatrix1D(yValuesSrtUp), new DoubleMatrix1D(yValuesSrtDw)};
      final DoubleMatrix1D[] firstSecondUpDw = new DoubleMatrix1D[4];
      for (int ii = 0; ii < 2; ++ii) {
        final double[] slopesUpDw =
            _solver.slopesCalculator(yValuesSrtUpDw[ii].getData(), intervals);
        final PiecewisePolynomialResult resultUpDw =
            _method.interpolate(xValues, yValuesUpDw[ii].getData());
        final double[] initialFirstUpDw = _function.differentiate(resultUpDw, xValues).getData()[0];
        final double[] initialSecondUpDw =
            _function.differentiateTwice(resultUpDw, xValues).getData()[0];
        double[] firstUpDw =
            firstDerivativeCalculator(
                yValuesSrtUpDw[ii].getData(), intervals, slopesUpDw, initialFirstUpDw);
        boolean modFirstUpDw = false;
        double[] aValuesUpDw = aValuesCalculator(slopesUpDw, firstUpDw);
        double[] bValuesUpDw = bValuesCalculator(slopesUpDw, firstUpDw);
        double[][] intervalsAUpDw = getIntervalsA(intervals, slopesUpDw, firstUpDw, bValuesUpDw);
        double[][] intervalsBUpDw = getIntervalsB(intervals, slopesUpDw, firstUpDw, aValuesUpDw);
        while (modFirstUpDw == false) {
          k = 0;
          for (int i = 0; i < nDataPts - 2; ++i) {
            if (firstUpDw[i + 1] > 0.) {
              if (intervalsAUpDw[i + 1][1] + Math.abs(intervalsAUpDw[i + 1][1]) * ERROR
                      < intervalsBUpDw[i][0] - Math.abs(intervalsBUpDw[i][0]) * ERROR
                  | intervalsAUpDw[i + 1][0] - Math.abs(intervalsAUpDw[i + 1][0]) * ERROR
                      > intervalsBUpDw[i][1] + Math.abs(intervalsBUpDw[i][1]) * ERROR) {
                ++k;
                firstUpDw[i + 1] =
                    firstDerivativesRecalculator(
                        intervals, slopesUpDw, aValuesUpDw, bValuesUpDw, i + 1);
              }
            }
          }
          if (k == 0) {
            modFirstUpDw = true;
          }
          aValuesUpDw = aValuesCalculator(slopesUpDw, firstUpDw);
          bValuesUpDw = bValuesCalculator(slopesUpDw, firstUpDw);
          intervalsAUpDw = getIntervalsA(intervals, slopesUpDw, firstUpDw, bValuesUpDw);
          intervalsBUpDw = getIntervalsB(intervals, slopesUpDw, firstUpDw, aValuesUpDw);
        }
        final double[] secondUpDw =
            secondDerivativeCalculator(initialSecondUpDw, intervalsAUpDw, intervalsBUpDw);
        firstSecondUpDw[ii] = new DoubleMatrix1D(firstUpDw);
        firstSecondUpDw[2 + ii] = new DoubleMatrix1D(secondUpDw);
      }
      for (int j = 0; j < nDataPts; ++j) {
        tmpFirst[j][l - nExtra] =
            0.5 * (firstSecondUpDw[0].getData()[j] - firstSecondUpDw[1].getData()[j]) / den;
        tmpSecond[j][l - nExtra] =
            0.5 * (firstSecondUpDw[2].getData()[j] - firstSecondUpDw[3].getData()[j]) / den;
      }
      yValuesUp[l] = yValues[l];
      yValuesDw[l] = yValues[l];
    }
    for (int i = 0; i < nDataPts; ++i) {
      firstWithSensitivity[i + 1] = new DoubleMatrix1D(tmpFirst[i]);
      secondWithSensitivity[i + 1] = new DoubleMatrix1D(tmpSecond[i]);
    }

    final DoubleMatrix2D[] resMatrix =
        _solver.solveWithSensitivity(
            yValuesSrt,
            intervals,
            slopes,
            slopesSensitivity,
            firstWithSensitivity,
            secondWithSensitivity);

    for (int l = 0; l < nDataPts; ++l) {
      DoubleMatrix2D m = resMatrix[l];
      final int rows = m.getNumberOfRows();
      final int cols = m.getNumberOfColumns();
      for (int i = 0; i < rows; ++i) {
        for (int j = 0; j < cols; ++j) {
          ArgumentChecker.isTrue(
              Doubles.isFinite(m.getEntry(i, j)), "Matrix contains a NaN or infinite");
        }
      }
    }

    final DoubleMatrix2D coefMatrix = resMatrix[0];
    final DoubleMatrix2D[] coefSenseMatrix = new DoubleMatrix2D[nDataPts - 1];
    System.arraycopy(resMatrix, 1, coefSenseMatrix, 0, nDataPts - 1);
    final int nCoefs = coefMatrix.getNumberOfColumns();

    return new PiecewisePolynomialResultsWithSensitivity(
        new DoubleMatrix1D(xValues), coefMatrix, nCoefs, 1, coefSenseMatrix);
  }