Beispiel #1
0
  /**
   * Computes the euclidean distance between a neuron and a vector
   *
   * @param v A vector
   * @return A double with the euclidean distance
   */
  public double euclideaDist(double[] v) {
    int i;
    double aux = 0;

    for (i = 0; i < nInput; i++) aux += (v[i] - centre[i]) * (v[i] - centre[i]);
    return (Math.sqrt(aux));
  }
Beispiel #2
0
  /**
   * This private method computes the distance between an example in the dataset and a cluster
   * centroid. The distance is measure as the square root of the sum of the squares of the
   * differences between the example and the cetroid for all the dimensions.
   *
   * @param a The example in the dataset
   * @param b The culster centroid
   * @return The distance between a and b as a double precision float value.
   */
  private static double distance(double a[], double b[]) {

    // Euclid distance between two patterns
    double d = 0;
    for (int i = 0; i < a.length; i++) d += (a[i] - b[i]) * (a[i] - b[i]);
    return (double) Math.sqrt(d);
  }
Beispiel #3
0
 /**
  * Fisher distribution
  *
  * @param f Fisher value
  * @param n1 N1 value
  * @param n2 N2 value
  * @return P-value associated
  */
 private static double FishF(double f, int n1, int n2) {
   double x = n2 / (n1 * f + n2);
   if ((n1 % 2) == 0) {
     return StatCom(1 - x, n2, n1 + n2 - 4, n2 - 2) * Math.pow(x, n2 / 2.0);
   }
   if ((n2 % 2) == 0) {
     return 1 - StatCom(x, n1, n1 + n2 - 4, n1 - 2) * Math.pow(1 - x, n1 / 2.0);
   }
   double th = Math.atan(Math.sqrt(n1 * f / (1.0 * n2)));
   double a = th / (Math.PI / 2.0);
   double sth = Math.sin(th);
   double cth = Math.cos(th);
   if (n2 > 1) {
     a = a + sth * cth * StatCom(cth * cth, 2, n2 - 3, -1) / (Math.PI / 2.0);
   }
   if (n1 == 1) {
     return 1 - a;
   }
   double c =
       4 * StatCom(sth * sth, n2 + 1, n1 + n2 - 4, n2 - 2) * sth * Math.pow(cth, n2) / Math.PI;
   if (n2 == 1) {
     return 1 - a + c / 2.0;
   }
   int k = 2;
   while (k <= (n2 - 1) / 2.0) {
     c = c * k / (k - .5);
     k = k + 1;
   }
   return 1 - a + c;
 }
Beispiel #4
0
 /**
  * Chi square distribution
  *
  * @param x Chi^2 value
  * @param n Degrees of freedom
  * @return P-value associated
  */
 private static double ChiSq(double x, int n) {
   if (n == 1 & x > 1000) {
     return 0;
   }
   if (x > 1000 | n > 1000) {
     double q = ChiSq((x - n) * (x - n) / (2 * n), 1) / 2;
     if (x > n) {
       return q;
     }
     {
       return 1 - q;
     }
   }
   double p = Math.exp(-0.5 * x);
   if ((n % 2) == 1) {
     p = p * Math.sqrt(2 * x / Math.PI);
   }
   double k = n;
   while (k >= 2) {
     p = p * x / k;
     k = k - 2;
   }
   double t = p;
   double a = n;
   while (t > 0.0000000001 * p) {
     a = a + 2;
     t = t * x / a;
     p = p + t;
   }
   return 1 - p;
 }
Beispiel #5
0
  private double computeAdjustedResidual(int data[][], int classData[], Rule regla, int clase) {

    double suma = 0;
    int i;

    for (i = 0; i < regla.getRule().length; i++) {
      suma +=
          computeStandarizedResidual(data, classData, regla.getiCondition(i), clase)
              / Math.sqrt(
                  computeMaximumLikelohoodEstimate(data, classData, regla.getiCondition(i), clase));
    }

    return suma;
  }
Beispiel #6
0
  /**
   * This method runs the multiple comparison tests
   *
   * @param code A value to codify which post-hoc methods apply
   * @param results Array with the results of the methods
   * @param algorithmName Array with the name of the methods employed
   * @return A string with the contents of the test in LaTeX format
   */
  private static String runMultiple(double[][] results, String algorithmName[]) {

    int i, j, k;
    int posicion;
    double mean[][];
    MultiplePair orden[][];
    MultiplePair rank[][];
    boolean encontrado;
    int ig;
    double sum;
    boolean visto[];
    Vector<Integer> porVisitar;
    double Rj[];
    double friedman;
    double sumatoria = 0;
    double termino1, termino2, termino3;
    double iman;
    boolean vistos[];
    int pos, tmp, counter;
    String cad;
    double maxVal;
    double Pi[];
    double ALPHAiHolm[];
    double ALPHAiShaffer[];
    String ordenAlgoritmos[];
    double ordenRankings[];
    int order[];
    double adjustedP[][];
    double SE;
    boolean parar;
    Vector<Integer> indices = new Vector<Integer>();
    Vector<Vector<Relation>> exhaustiveI = new Vector<Vector<Relation>>();
    boolean[][] cuadro;
    double minPi, tmpPi, maxAPi, tmpAPi;
    Relation[] parejitas;
    Vector<Integer> T;
    int Tarray[];

    DecimalFormat nf4 = (DecimalFormat) DecimalFormat.getInstance();
    nf4.setMaximumFractionDigits(4);
    nf4.setMinimumFractionDigits(0);

    DecimalFormatSymbols dfs = nf4.getDecimalFormatSymbols();
    dfs.setDecimalSeparator('.');
    nf4.setDecimalFormatSymbols(dfs);

    DecimalFormat nf6 = (DecimalFormat) DecimalFormat.getInstance();
    nf6.setMaximumFractionDigits(6);
    nf6.setMinimumFractionDigits(0);

    nf6.setDecimalFormatSymbols(dfs);

    String out = "";

    int nDatasets = Configuration.getNDatasets();

    Iman = Configuration.isIman();
    Nemenyi = Configuration.isNemenyi();
    Bonferroni = Configuration.isBonferroni();
    Holm = Configuration.isHolm();
    Hoch = Configuration.isHochberg();
    Hommel = Configuration.isHommel();
    Scha = Configuration.isShaffer();
    Berg = Configuration.isBergman();

    mean = new double[nDatasets][algorithmName.length];

    // Maximize performance
    if (Configuration.getObjective() == 1) {

      /*Compute the average performance per algorithm for each data set*/
      for (i = 0; i < nDatasets; i++) {
        for (j = 0; j < algorithmName.length; j++) {
          mean[i][j] = results[j][i];
        }
      }

    }
    // Minimize performance
    else {

      double maxValue = Double.MIN_VALUE;

      /*Compute the average performance per algorithm for each data set*/
      for (i = 0; i < nDatasets; i++) {
        for (j = 0; j < algorithmName.length; j++) {

          if (results[j][i] > maxValue) {
            maxValue = results[j][i];
          }

          mean[i][j] = (-1.0 * results[j][i]);
        }
      }

      for (i = 0; i < nDatasets; i++) {
        for (j = 0; j < algorithmName.length; j++) {
          mean[i][j] += maxValue;
        }
      }
    }

    /*We use the pareja structure to compute and order rankings*/
    orden = new MultiplePair[nDatasets][algorithmName.length];
    for (i = 0; i < nDatasets; i++) {
      for (j = 0; j < algorithmName.length; j++) {
        orden[i][j] = new MultiplePair(j, mean[i][j]);
      }
      Arrays.sort(orden[i]);
    }

    /*building of the rankings table per algorithms and data sets*/
    rank = new MultiplePair[nDatasets][algorithmName.length];
    posicion = 0;
    for (i = 0; i < nDatasets; i++) {
      for (j = 0; j < algorithmName.length; j++) {
        encontrado = false;
        for (k = 0; k < algorithmName.length && !encontrado; k++) {
          if (orden[i][k].indice == j) {
            encontrado = true;
            posicion = k + 1;
          }
        }
        rank[i][j] = new MultiplePair(posicion, orden[i][posicion - 1].valor);
      }
    }

    /*In the case of having the same performance, the rankings are equal*/
    for (i = 0; i < nDatasets; i++) {
      visto = new boolean[algorithmName.length];
      porVisitar = new Vector<Integer>();

      Arrays.fill(visto, false);
      for (j = 0; j < algorithmName.length; j++) {
        porVisitar.removeAllElements();
        sum = rank[i][j].indice;
        visto[j] = true;
        ig = 1;
        for (k = j + 1; k < algorithmName.length; k++) {
          if (rank[i][j].valor == rank[i][k].valor && !visto[k]) {
            sum += rank[i][k].indice;
            ig++;
            porVisitar.add(new Integer(k));
            visto[k] = true;
          }
        }
        sum /= (double) ig;
        rank[i][j].indice = sum;
        for (k = 0; k < porVisitar.size(); k++) {
          rank[i][((Integer) porVisitar.elementAt(k)).intValue()].indice = sum;
        }
      }
    }

    /*compute the average ranking for each algorithm*/
    Rj = new double[algorithmName.length];
    for (i = 0; i < algorithmName.length; i++) {
      Rj[i] = 0;
      for (j = 0; j < nDatasets; j++) {
        Rj[i] += rank[j][i].indice / ((double) nDatasets);
      }
    }

    /*Print the average ranking per algorithm*/
    out += "\n\nAverage ranks obtained by applying the Friedman procedure\n\n";

    out +=
        "\\begin{table}[!htp]\n"
            + "\\centering\n"
            + "\\begin{tabular}{|c|c|}\\hline\n"
            + "Algorithm&Ranking\\\\\\hline\n";
    for (i = 0; i < algorithmName.length; i++) {
      out += (String) algorithmName[i] + " & " + nf4.format(Rj[i]) + "\\\\\n";
    }
    out += "\\hline\n\\end{tabular}\n\\caption{Average Rankings of the algorithms}\n\\end{table}";

    /*Compute the Friedman statistic*/
    termino1 =
        (12 * (double) nDatasets)
            / ((double) algorithmName.length * ((double) algorithmName.length + 1));
    termino2 =
        (double) algorithmName.length
            * ((double) algorithmName.length + 1)
            * ((double) algorithmName.length + 1)
            / (4.0);
    for (i = 0; i < algorithmName.length; i++) {
      sumatoria += Rj[i] * Rj[i];
    }
    friedman = (sumatoria - termino2) * termino1;
    out +=
        "\n\nFriedman statistic considering reduction performance (distributed according to chi-square with "
            + (algorithmName.length - 1)
            + " degrees of freedom: "
            + nf6.format(friedman)
            + ".\n\n";

    double pFriedman;
    pFriedman = ChiSq(friedman, (algorithmName.length - 1));

    System.out.print("P-value computed by Friedman Test: " + pFriedman + ".\\newline\n\n");

    /*Compute the Iman-Davenport statistic*/
    if (Iman) {
      iman = ((nDatasets - 1) * friedman) / (nDatasets * (algorithmName.length - 1) - friedman);
      out +=
          "Iman and Davenport statistic considering reduction performance (distributed according to F-distribution with "
              + (algorithmName.length - 1)
              + " and "
              + (algorithmName.length - 1) * (nDatasets - 1)
              + " degrees of freedom: "
              + nf6.format(iman)
              + ".\n\n";

      double pIman;

      pIman = FishF(iman, (algorithmName.length - 1), (algorithmName.length - 1) * (nDatasets - 1));
      System.out.print("P-value computed by Iman and Daveport Test: " + pIman + ".\\newline\n\n");
    }

    termino3 =
        Math.sqrt(
            (double) algorithmName.length
                * ((double) algorithmName.length + 1)
                / (6.0 * (double) nDatasets));

    out += "\n\n\\pagebreak\n\n";

    /** ********** NxN COMPARISON ************* */
    out += "\\section{Post hoc comparisons}";
    out +=
        "\n\nResults achieved on post hoc comparisons for $\\alpha = 0.05$, $\\alpha = 0.10$ and adjusted p-values.\n\n";
    /*Compute the unadjusted p_i value for each comparison alpha=0.05*/
    Pi = new double[(int) combinatoria(2, algorithmName.length)];
    ALPHAiHolm = new double[(int) combinatoria(2, algorithmName.length)];
    ALPHAiShaffer = new double[(int) combinatoria(2, algorithmName.length)];
    ordenAlgoritmos = new String[(int) combinatoria(2, algorithmName.length)];
    ordenRankings = new double[(int) combinatoria(2, algorithmName.length)];
    order = new int[(int) combinatoria(2, algorithmName.length)];
    parejitas = new Relation[(int) combinatoria(2, algorithmName.length)];
    T = new Vector<Integer>();
    T = trueHShaffer(algorithmName.length);
    Tarray = new int[T.size()];
    for (i = 0; i < T.size(); i++) {
      Tarray[i] = ((Integer) T.elementAt(i)).intValue();
    }
    Arrays.sort(Tarray);

    SE = termino3;
    vistos = new boolean[(int) combinatoria(2, algorithmName.length)];
    for (i = 0, k = 0; i < algorithmName.length; i++) {
      for (j = i + 1; j < algorithmName.length; j++, k++) {
        ordenRankings[k] = Math.abs(Rj[i] - Rj[j]);
        ordenAlgoritmos[k] = (String) algorithmName[i] + " vs. " + (String) algorithmName[j];
        parejitas[k] = new Relation(i, j);
      }
    }

    Arrays.fill(vistos, false);
    for (i = 0; i < ordenRankings.length; i++) {
      for (j = 0; vistos[j] == true; j++) ;
      pos = j;
      maxVal = ordenRankings[j];
      for (j = j + 1; j < ordenRankings.length; j++) {
        if (vistos[j] == false && ordenRankings[j] > maxVal) {
          pos = j;
          maxVal = ordenRankings[j];
        }
      }
      vistos[pos] = true;
      order[i] = pos;
    }

    /*Computing the logically related hypotheses tests (Shaffer and Bergmann-Hommel)*/

    pos = 0;
    tmp = Tarray.length - 1;
    for (i = 0; i < order.length; i++) {
      Pi[i] = 2 * CDF_Normal.normp((-1) * Math.abs((ordenRankings[order[i]]) / SE));
      ALPHAiHolm[i] = 0.05 / ((double) order.length - (double) i);
      ALPHAiShaffer[i] = 0.05 / ((double) order.length - (double) Math.max(pos, i));
      if (i == pos && Pi[i] <= ALPHAiShaffer[i]) {
        tmp--;
        pos = (int) combinatoria(2, algorithmName.length) - Tarray[tmp];
      }
    }

    out += "\\subsection{P-values for $\\alpha=0.05$}\n\n";

    int count = 4;

    if (Holm) {
      count++;
    }
    if (Scha) {
      count++;
    }
    out +=
        "\\begin{table}[!htp]\n\\centering\\scriptsize\n"
            + "\\begin{tabular}{"
            + printC(count)
            + "}\n"
            + "$i$&algorithms&$z=(R_0 - R_i)/SE$&$p$";
    if (Holm) {
      out += "&Holm";
    }
    if (Scha) {
      out += "&Shaffer";
    }

    out += "\\\\\n\\hline";

    for (i = 0; i < order.length; i++) {
      out +=
          (order.length - i)
              + "&"
              + ordenAlgoritmos[order[i]]
              + "&"
              + nf6.format(Math.abs((ordenRankings[order[i]]) / SE))
              + "&"
              + nf6.format(Pi[i]);
      if (Holm) {
        out += "&" + nf6.format(ALPHAiHolm[i]);
      }
      if (Scha) {
        out += "&" + nf6.format(ALPHAiShaffer[i]);
      }
      out += "\\\\\n";
    }

    out +=
        "\\hline\n"
            + "\\end{tabular}\n\\caption{P-values Table for $\\alpha=0.05$}\n"
            + "\\end{table}";

    /*Compute the rejected hipotheses for each test*/

    if (Nemenyi) {
      out +=
          "Nemenyi's procedure rejects those hypotheses that have a p-value $\\le"
              + nf6.format(0.05 / (double) (order.length))
              + "$.\n\n";
    }

    if (Holm) {
      parar = false;
      for (i = 0; i < order.length && !parar; i++) {
        if (Pi[i] > ALPHAiHolm[i]) {
          out +=
              "Holm's procedure rejects those hypotheses that have a p-value $\\le"
                  + nf6.format(ALPHAiHolm[i])
                  + "$.\n\n";
          parar = true;
        }
      }
    }

    if (Scha) {
      parar = false;
      for (i = 0; i < order.length && !parar; i++) {
        if (Pi[i] <= ALPHAiShaffer[i]) {
          out +=
              "Shaffer's procedure rejects those hypotheses that have a p-value $\\le"
                  + nf6.format(ALPHAiShaffer[i])
                  + "$.\n\n";
          parar = true;
        }
      }
    }

    /*For Bergmann-Hommel's procedure, 9 algorithms could suppose intense computation*/
    if (algorithmName.length <= MAX_ALGORITHMS) {
      for (i = 0; i < algorithmName.length; i++) {
        indices.add(new Integer(i));
      }
      exhaustiveI = obtainExhaustive(indices);
      cuadro = new boolean[algorithmName.length][algorithmName.length];
      for (i = 0; i < algorithmName.length; i++) {
        Arrays.fill(cuadro[i], false);
      }

      for (i = 0; i < exhaustiveI.size(); i++) {
        minPi =
            2
                * CDF_Normal.normp(
                    (-1)
                        * Math.abs(
                            Rj[
                                    ((Relation)
                                            ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                .elementAt(0))
                                        .i]
                                - Rj[
                                    ((Relation)
                                            ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                .elementAt(0))
                                        .j])
                        / SE);

        for (j = 1; j < ((Vector<Relation>) exhaustiveI.elementAt(i)).size(); j++) {
          tmpPi =
              2
                  * CDF_Normal.normp(
                      (-1)
                          * Math.abs(
                              Rj[
                                      ((Relation)
                                              ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                  .elementAt(j))
                                          .i]
                                  - Rj[
                                      ((Relation)
                                              ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                  .elementAt(j))
                                          .j])
                          / SE);
          if (tmpPi < minPi) {
            minPi = tmpPi;
          }
        }
        if (minPi > (0.05 / ((double) ((Vector<Relation>) exhaustiveI.elementAt(i)).size()))) {
          for (j = 0; j < ((Vector<Relation>) exhaustiveI.elementAt(i)).size(); j++) {
            cuadro[((Relation) ((Vector<Relation>) exhaustiveI.elementAt(i)).elementAt(j)).i][
                    ((Relation) ((Vector<Relation>) exhaustiveI.elementAt(i)).elementAt(j)).j] =
                true;
          }
        }
      }

      if (Berg) {
        cad = "";
        cad += "Bergmann's procedure rejects these hypotheses:\n\n";
        cad += "\\begin{itemize}\n\n";

        counter = 0;
        for (i = 0; i < cuadro.length; i++) {
          for (j = i + 1; j < cuadro.length; j++) {
            if (cuadro[i][j] == false) {
              cad += "\\item " + algorithmName[i] + " vs. " + algorithmName[j] + "\n\n";
              counter++;
            }
          }
        }
        cad += "\\end{itemize}\n\n";

        if (counter > 0) {
          out += cad;
        } else {
          out += "Bergmann's procedure does not reject any hypotheses.\n\n";
        }
      }
    }

    out += "\\pagebreak\n\n";
    out += "\\subsection{P-values for $\\alpha=0.10$}\n\n";

    /*Compute the unadjusted p_i value for each comparison alpha=0.10*/
    Pi = new double[(int) combinatoria(2, algorithmName.length)];
    ALPHAiHolm = new double[(int) combinatoria(2, algorithmName.length)];
    ALPHAiShaffer = new double[(int) combinatoria(2, algorithmName.length)];
    ordenAlgoritmos = new String[(int) combinatoria(2, algorithmName.length)];
    ordenRankings = new double[(int) combinatoria(2, algorithmName.length)];
    order = new int[(int) combinatoria(2, algorithmName.length)];

    SE = termino3;
    vistos = new boolean[(int) combinatoria(2, algorithmName.length)];
    for (i = 0, k = 0; i < algorithmName.length; i++) {
      for (j = i + 1; j < algorithmName.length; j++, k++) {
        ordenRankings[k] = Math.abs(Rj[i] - Rj[j]);
        ordenAlgoritmos[k] = (String) algorithmName[i] + " vs. " + (String) algorithmName[j];
      }
    }

    Arrays.fill(vistos, false);
    for (i = 0; i < ordenRankings.length; i++) {
      for (j = 0; vistos[j] == true; j++) ;
      pos = j;
      maxVal = ordenRankings[j];

      for (j = j + 1; j < ordenRankings.length; j++) {
        if (vistos[j] == false && ordenRankings[j] > maxVal) {
          pos = j;
          maxVal = ordenRankings[j];
        }
      }
      vistos[pos] = true;
      order[i] = pos;
    }

    /*Computing the logically related hypotheses tests (Shaffer and Bergmann-Hommel)*/
    pos = 0;
    tmp = Tarray.length - 1;
    for (i = 0; i < order.length; i++) {
      Pi[i] = 2 * CDF_Normal.normp((-1) * Math.abs((ordenRankings[order[i]]) / SE));
      ALPHAiHolm[i] = 0.1 / ((double) order.length - (double) i);
      ALPHAiShaffer[i] = 0.1 / ((double) order.length - (double) Math.max(pos, i));
      if (i == pos && Pi[i] <= ALPHAiShaffer[i]) {
        tmp--;
        pos = (int) combinatoria(2, algorithmName.length) - Tarray[tmp];
      }
    }

    count = 4;

    if (Holm) {
      count++;
    }
    if (Scha) {
      count++;
    }
    out +=
        "\\begin{table}[!htp]\n\\centering\\scriptsize\n"
            + "\\begin{tabular}{"
            + printC(count)
            + "}\n"
            + "$i$&algorithms&$z=(R_0 - R_i)/SE$&$p$";
    if (Holm) {
      out += "&Holm";
    }
    if (Scha) {
      out += "&Shaffer";
    }
    out += "\\\\\n\\hline";

    for (i = 0; i < order.length; i++) {
      out +=
          (order.length - i)
              + "&"
              + ordenAlgoritmos[order[i]]
              + "&"
              + nf6.format(Math.abs((ordenRankings[order[i]]) / SE))
              + "&"
              + nf6.format(Pi[i]);
      if (Holm) {
        out += "&" + nf6.format(ALPHAiHolm[i]);
      }
      if (Scha) {
        out += "&" + nf6.format(ALPHAiShaffer[i]);
      }
      out += "\\\\\n";
    }

    out +=
        "\\hline\n"
            + "\\end{tabular}\n\\caption{P-values Table for $\\alpha=0.10$}\n"
            + "\\end{table}";

    /*Compute the rejected hipotheses for each test*/

    if (Nemenyi) {
      out +=
          "Nemenyi's procedure rejects those hypotheses that have a p-value $\\le"
              + nf6.format(0.10 / (double) (order.length))
              + "$.\n\n";
    }

    if (Holm) {
      parar = false;

      for (i = 0; i < order.length && !parar; i++) {
        if (Pi[i] > ALPHAiHolm[i]) {
          out +=
              "Holm's procedure rejects those hypotheses that have a p-value $\\le"
                  + nf6.format(ALPHAiHolm[i])
                  + "$.\n\n";
          parar = true;
        }
      }
    }

    if (Scha) {
      parar = false;
      for (i = 0; i < order.length && !parar; i++) {
        if (Pi[i] <= ALPHAiShaffer[i]) {
          out +=
              "Shaffer's procedure rejects those hypotheses that have a p-value $\\le"
                  + nf6.format(ALPHAiShaffer[i])
                  + "$.\n\n";
          parar = true;
        }
      }
    }

    /*For Bergmann-Hommel's procedure, 9 algorithms could suppose intense computation*/
    if (algorithmName.length <= MAX_ALGORITHMS) {

      indices.removeAllElements();
      for (i = 0; i < algorithmName.length; i++) {
        indices.add(new Integer(i));
      }

      exhaustiveI = obtainExhaustive(indices);
      cuadro = new boolean[algorithmName.length][algorithmName.length];

      for (i = 0; i < algorithmName.length; i++) {
        Arrays.fill(cuadro[i], false);
      }

      for (i = 0; i < exhaustiveI.size(); i++) {
        minPi =
            2
                * CDF_Normal.normp(
                    (-1)
                        * Math.abs(
                            Rj[
                                    ((Relation)
                                            ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                .elementAt(0))
                                        .i]
                                - Rj[
                                    ((Relation)
                                            ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                .elementAt(0))
                                        .j])
                        / SE);
        for (j = 1; j < ((Vector<Relation>) exhaustiveI.elementAt(i)).size(); j++) {
          tmpPi =
              2
                  * CDF_Normal.normp(
                      (-1)
                          * Math.abs(
                              Rj[
                                      ((Relation)
                                              ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                  .elementAt(j))
                                          .i]
                                  - Rj[
                                      ((Relation)
                                              ((Vector<Relation>) exhaustiveI.elementAt(i))
                                                  .elementAt(j))
                                          .j])
                          / SE);
          if (tmpPi < minPi) {
            minPi = tmpPi;
          }
        }

        if (minPi > 0.1 / ((double) ((Vector<Relation>) exhaustiveI.elementAt(i)).size())) {
          for (j = 0; j < ((Vector<Relation>) exhaustiveI.elementAt(i)).size(); j++) {
            cuadro[((Relation) ((Vector<Relation>) exhaustiveI.elementAt(i)).elementAt(j)).i][
                    ((Relation) ((Vector<Relation>) exhaustiveI.elementAt(i)).elementAt(j)).j] =
                true;
          }
        }
      }

      if (Berg) {

        cad = "";
        cad += "Bergmann's procedure rejects these hypotheses:\n\n";
        cad += "\\begin{itemize}\n\n";

        counter = 0;
        for (i = 0; i < cuadro.length; i++) {
          for (j = i + 1; j < cuadro.length; j++) {
            if (cuadro[i][j] == false) {
              cad += "\\item " + algorithmName[i] + " vs. " + algorithmName[j] + "\n\n";
              counter++;
            }
          }
        }
        cad += "\\end{itemize}\n\n";

        if (counter > 0) {
          out += cad;
        } else {
          out += "Bergmann's procedure does not reject any hypotheses.\n\n";
        }
      }
    }

    out += "\\pagebreak\n\n";

    /** ********** ADJUSTED P-VALUES NxN COMPARISON ************* */
    out += "\\subsection{Adjusted p-values}\n\n";

    adjustedP = new double[Pi.length][4];
    pos = 0;
    tmp = Tarray.length - 1;

    for (i = 0; i < adjustedP.length; i++) {
      adjustedP[i][0] = Pi[i] * (double) (adjustedP.length);
      adjustedP[i][1] = Pi[i] * (double) (adjustedP.length - i);
      adjustedP[i][2] = Pi[i] * ((double) adjustedP.length - (double) Math.max(pos, i));

      if (i == pos) {
        tmp--;
        pos = (int) combinatoria(2, algorithmName.length) - Tarray[tmp];
      }

      if (algorithmName.length <= MAX_ALGORITHMS) {
        maxAPi = Double.MIN_VALUE;
        minPi = Double.MAX_VALUE;
        for (j = 0; j < exhaustiveI.size(); j++) {
          if (exhaustiveI.elementAt(j).toString().contains(parejitas[order[i]].toString())) {
            minPi =
                2
                    * CDF_Normal.normp(
                        (-1)
                            * Math.abs(
                                Rj[
                                        ((Relation)
                                                ((Vector<Relation>) exhaustiveI.elementAt(j))
                                                    .elementAt(0))
                                            .i]
                                    - Rj[
                                        ((Relation)
                                                ((Vector<Relation>) exhaustiveI.elementAt(j))
                                                    .elementAt(0))
                                            .j])
                            / SE);
            for (k = 1; k < ((Vector<Relation>) exhaustiveI.elementAt(j)).size(); k++) {
              tmpPi =
                  2
                      * CDF_Normal.normp(
                          (-1)
                              * Math.abs(
                                  Rj[
                                          ((Relation)
                                                  ((Vector<Relation>) exhaustiveI.elementAt(j))
                                                      .elementAt(k))
                                              .i]
                                      - Rj[
                                          ((Relation)
                                                  ((Vector<Relation>) exhaustiveI.elementAt(j))
                                                      .elementAt(k))
                                              .j])
                              / SE);
              if (tmpPi < minPi) {
                minPi = tmpPi;
              }
            }
            tmpAPi = minPi * (double) (((Vector<Relation>) exhaustiveI.elementAt(j)).size());
            if (tmpAPi > maxAPi) {
              maxAPi = tmpAPi;
            }
          }
        }

        adjustedP[i][3] = maxAPi;
      }
    }

    for (i = 1; i < adjustedP.length; i++) {
      if (adjustedP[i][1] < adjustedP[i - 1][1]) adjustedP[i][1] = adjustedP[i - 1][1];
      if (adjustedP[i][2] < adjustedP[i - 1][2]) adjustedP[i][2] = adjustedP[i - 1][2];
      if (adjustedP[i][3] < adjustedP[i - 1][3]) adjustedP[i][3] = adjustedP[i - 1][3];
    }

    count = 3;

    if (Nemenyi) {
      count++;
    }
    if (Holm) {
      count++;
    }
    if (Scha) {
      count++;
    }
    if (Berg) {
      count++;
    }

    out +=
        "\\begin{table}[!htp]\n\\centering\\scriptsize\n"
            + "\\begin{tabular}{"
            + printC(count)
            + "}\n"
            + "i&hypothesis&unadjusted $p$";

    if (Nemenyi) {
      out += "&$p_{Neme}$";
    }
    if (Holm) {
      out += "&$p_{Holm}$";
    }
    if (Scha) {
      out += "&$p_{Shaf}$";
    }
    if (Berg) {
      out += "&$p_{Berg}$";
    }

    out += "\\\\\n\\hline";

    for (i = 0; i < Pi.length; i++) {
      out +=
          (i + 1)
              + "&"
              + algorithmName[parejitas[order[i]].i]
              + " vs ."
              + algorithmName[parejitas[order[i]].j]
              + "&"
              + nf6.format(Pi[i]);
      if (Nemenyi) {
        out += "&" + nf6.format(adjustedP[i][0]);
      }
      if (Holm) {
        out += "&" + nf6.format(adjustedP[i][1]);
      }
      if (Scha) {
        out += "&" + nf6.format(adjustedP[i][2]);
      }
      if (Berg) {
        out += "&" + nf6.format(adjustedP[i][3]);
      }

      out += "\\\\\n";
    }

    out += "\\hline\n" + "\\end{tabular}\n\\caption{Adjusted $p$-values}\n" + "\\end{table}\n\n";
    out += "\\end{landscape}\n\\end{document}";

    return out;
  } // end-method
Beispiel #7
0
  private double computeStandarizedResidual(
      int data[][], int classData[], Condition cond, int clase) {
    double tmp = computeEAipAjq(data, classData, cond, clase);

    return (computeCountAipAjq(data, classData, cond, clase) - tmp) / Math.sqrt(tmp);
  }