/** calcul du gradient en chaque beta */
  private double[] computeGradBeta(
      ArrayList<SimpleCacheKernel<T>> kernels, List<TrainingSample<T>> l) {
    double grad[] = new double[kernels.size()];

    for (int i = 0; i < kernels.size(); i++) {
      double matrix[][] = kernels.get(i).getKernelMatrix(l);
      double a[] = svm.getAlphas();

      for (int x = 0; x < matrix.length; x++) {
        int l1 = l.get(x).label;
        for (int y = 0; y < matrix.length; y++) {
          int l2 = l.get(y).label;
          grad[i] += 0.5 * l1 * l2 * a[x] * a[y] * matrix[x][y];
        }
      }
    }

    debug.print(3, "gradDir : " + Arrays.toString(grad));

    return grad;
  }
  private double performMKLStep(
      double suma,
      double[] grad,
      ArrayList<SimpleCacheKernel<T>> kernels,
      ArrayList<Double> weights,
      List<TrainingSample<T>> l) {
    debug.print(2, ".");
    // compute objective function
    double oldObjective = +suma;
    for (int i = 0; i < grad.length; i++) {
      oldObjective -= weights.get(i) * grad[i];
    }
    debug.println(3, "oldObjective : " + oldObjective + " sumAlpha : " + suma);

    // compute optimal step
    double newBeta[] = new double[grad.length];

    for (int i = 0; i < grad.length; i++) {
      if (grad[i] >= 0 && weights.get(i) >= 0) {
        newBeta[i] = grad[i] * weights.get(i) * weights.get(i) / p_norm;
        newBeta[i] = Math.pow(newBeta[i], 1 / ((double) 1 + p_norm));
      } else newBeta[i] = 0;
    }

    // normalize
    double norm = 0;
    for (int i = 0; i < newBeta.length; i++) norm += Math.pow(newBeta[i], p_norm);
    norm = Math.pow(norm, -1 / (double) p_norm);
    if (norm < 0) {
      debug.println(1, "Error normalization, norm < 0");
      return -1;
    }
    for (int i = 0; i < newBeta.length; i++) newBeta[i] *= norm;

    // regularize and renormalize
    double R = 0;
    for (int i = 0; i < kernels.size(); i++) R += Math.pow(weights.get(i) - newBeta[i], 2);
    R = Math.sqrt(R / (double) p_norm) * eps_regul;
    if (R < 0) {
      debug.println(1, "Error regularization, R < 0");
      return -1;
    }
    norm = 0;
    for (int i = 0; i < kernels.size(); i++) {
      newBeta[i] += R;
      if (newBeta[i] < num_cleaning) newBeta[i] = 0;
      norm += Math.pow(newBeta[i], p_norm);
    }
    norm = Math.pow(norm, -1 / (double) p_norm);
    if (norm < 0) {
      debug.println(1, "Error normalization, norm < 0");
      return -1;
    }
    for (int i = 0; i < newBeta.length; i++) newBeta[i] *= norm;

    // store new weights
    for (int i = 0; i < weights.size(); i++) weights.set(i, newBeta[i]);

    // compute objective function
    double objective = +suma;
    for (int i = 0; i < grad.length; i++) {
      objective -= weights.get(i) * grad[i];
    }
    debug.println(3, "objective : " + objective + " sumAlpha : " + suma);

    // return objective evolution
    return objective / oldObjective;
  }