/** Creates the total selector's set for get all the possible rules */ private Complejo hazSelectores(Dataset train) { Complejo almacenSelectores; int nClases = train.getnclases(); almacenSelectores = new Complejo(nClases); // Aqui voy a almacenar los selectores (numVariable,operador,valor) Attribute[] atributos = null; int num_atributos, type; Vector nominalValues; atributos = Attributes.getAttributes(); num_atributos = Attributes.getNumAttributes(); Selector s; for (int i = 0; i < train.getnentradas(); i++) { type = atributos[i].getType(); switch (type) { case 0: // NOMINAL nominalValues = atributos[i].getNominalValuesList(); // System.out.print("{"); for (int j = 0; j < nominalValues.size(); j++) { // System.out.print ((String)nominalValues.elementAt(j)+" "); s = new Selector(i, 0, (String) nominalValues.elementAt(j), true); // [atr,op,valor] // incluimos tb los valores en double para facilitar algunas funciones s.setValor((double) j); almacenSelectores.addSelector(s); // s.print(); } // System.out.println("}"); break; } // System.out.println(num_atributos); } return almacenSelectores; }
/** * Process a dataset file for a clustering problem. * * @param nfexamples Name of the dataset file * @param train The dataset file is for training or for test * @throws java.io.IOException if there is any semantical, lexical or sintactical error in the * input file. */ public void processClusterDataset(String nfexamples, boolean train) throws IOException { try { // Load in memory a dataset that contains a classification problem IS.readSet(nfexamples, train); nData = IS.getNumInstances(); nInputs = Attributes.getInputNumAttributes(); nVariables = nInputs + Attributes.getOutputNumAttributes(); if (Attributes.getOutputNumAttributes() != 0) { System.out.println("This algorithm can not process datasets with outputs"); System.out.println("All outputs will be removed"); } // Initialize and fill our own tables X = new double[nData][nInputs]; missing = new boolean[nData][nInputs]; // Maximum and minimum of inputs iMaximum = new double[nInputs]; iMinimum = new double[nInputs]; // Maximum and minimum for output data oMaximum = 0; oMinimum = 0; // All values are casted into double/integer nClasses = 0; for (int i = 0; i < X.length; i++) { Instance inst = IS.getInstance(i); for (int j = 0; j < nInputs; j++) { X[i][j] = IS.getInputNumericValue(i, j); missing[i][j] = inst.getInputMissingValues(j); if (X[i][j] > iMaximum[j] || i == 0) { iMaximum[j] = X[i][j]; } if (X[i][j] < iMinimum[j] || i == 0) { iMinimum[j] = X[i][j]; } } } } catch (Exception e) { System.out.println("DBG: Exception in readSet"); e.printStackTrace(); } }
/** Method interface for Automatic Branch and Bound */ public void ejecutar() { String resultado; int i, numFeatures; Date d; d = new Date(); resultado = "RESULTS generated at " + String.valueOf((Date) d) + " \n--------------------------------------------------\n"; resultado += "Algorithm Name: " + params.nameAlgorithm + "\n"; /* call of ABB algorithm */ runABB(); resultado += "\nPARTITION Filename: " + params.trainFileNameInput + "\n---------------\n\n"; resultado += "Features selected: \n"; for (i = numFeatures = 0; i < features.length; i++) if (features[i] == true) { resultado += Attributes.getInputAttribute(i).getName() + " - "; numFeatures++; } resultado += "\n\n" + String.valueOf(numFeatures) + " features of " + Attributes.getInputNumAttributes() + "\n\n"; resultado += "Error in test (using train for prediction): " + String.valueOf(data.validacionCruzada(features)) + "\n"; resultado += "Error in test (using test for prediction): " + String.valueOf(data.LVOTest(features)) + "\n"; resultado += "---------------\n"; System.out.println("Experiment completed successfully"); /* creates the new training and test datasets only with the selected features */ Files.writeFile(params.extraFileNameOutput, resultado); data.generarFicherosSalida(params.trainFileNameOutput, params.testFileNameOutput, features); }
/** * Fill TableVar with the characteristics of the variables and creates characteristics and * intervals for the fuzzy sets * * @param size Number of variables of the dataset */ public void Load(int size) { num_vars = size; // Stores the number of variables of the dataset var = new TypeVar[num_vars]; // Creates space for the structure // For each variable of the dataset for (int i = 0; i < num_vars; i++) { var[i] = new TypeVar(); // Creates space for the variable chars var[i].setName(Attributes.getInputAttribute(i).getName()); if (Attributes.getInputAttribute(i).getType() == Attribute.NOMINAL) { var[i].setType('e'); var[i].setContinuous(false); var[i].initValues(Attributes.getInputAttribute(i).getNominalValuesList()); var[i].setMin( 0); // Enumerated values are translated into values from 0 to number of elements - 1 var[i].setMax(Attributes.getInputAttribute(i).getNumNominalValues() - 1); var[i].setNLabels(Attributes.getInputAttribute(i).getNumNominalValues()); // Update max number of values for discrete vars if (var[i].getNLabels() > MaxValores) MaxValores = var[i].getNLabels(); } else if (Attributes.getInputAttribute(i).getType() == Attribute.REAL) { // Real: Continuous type var[i].setType('r'); var[i].setContinuous(true); var[i].setMin((float) Attributes.getInputAttribute(i).getMinAttribute()); var[i].setMax((float) Attributes.getInputAttribute(i).getMaxAttribute()); var[i].setNLabels(n_etiq); // Update the max number of labels for cont variables and number of values if (var[i].getNLabels() > MaxEtiquetas) MaxEtiquetas = var[i].getNLabels(); if (var[i].getNLabels() > MaxValores) MaxValores = var[i].getNLabels(); } else { // Integer: Continuous type var[i].setType('i'); var[i].setContinuous(true); var[i].setMin((float) Attributes.getInputAttribute(i).getMinAttribute()); var[i].setMax((float) Attributes.getInputAttribute(i).getMaxAttribute()); var[i].setNLabels(n_etiq); // Update the max number of labels for cont variables and number of values if (var[i].getNLabels() > MaxEtiquetas) MaxEtiquetas = var[i].getNLabels(); if (var[i].getNLabels() > MaxValores) MaxValores = var[i].getNLabels(); } } // Creates "Fuzzy" characteristics and intervals BaseDatos = new Fuzzy[num_vars][MaxValores]; for (int x = 0; x < num_vars; x++) for (int y = 0; y < MaxValores; y++) BaseDatos[x][y] = new Fuzzy(); }
/** * Creates a boolean array with all values to true * * @return returns a boolean vector with all values to true */ private boolean[] startSolution() { boolean fv[]; fv = new boolean[Attributes.getInputNumAttributes()]; for (int i = 0; i < fv.length; i++) fv[i] = true; return fv; }
private void normalizarTest() { int i, j, cont = 0, k; Instance temp; boolean hecho; double caja[]; StringTokenizer tokens; boolean nulls[]; /* Check if dataset corresponding with a classification problem */ if (Attributes.getOutputNumAttributes() < 1) { System.err.println( "This dataset haven´t outputs, so it not corresponding to a classification problem."); System.exit(-1); } else if (Attributes.getOutputNumAttributes() > 1) { System.err.println("This dataset have more of one output."); System.exit(-1); } if (Attributes.getOutputAttribute(0).getType() == Attribute.REAL) { System.err.println( "This dataset have an input attribute with floating values, so it not corresponding to a classification problem."); System.exit(-1); } datosTest = new double[test.getNumInstances()][Attributes.getInputNumAttributes()]; clasesTest = new int[test.getNumInstances()]; caja = new double[1]; for (i = 0; i < test.getNumInstances(); i++) { temp = test.getInstance(i); nulls = temp.getInputMissingValues(); datosTest[i] = test.getInstance(i).getAllInputValues(); for (j = 0; j < nulls.length; j++) if (nulls[j]) datosTest[i][j] = 0.0; caja = test.getInstance(i).getAllOutputValues(); clasesTest[i] = (int) caja[0]; for (k = 0; k < datosTest[i].length; k++) { if (Attributes.getInputAttribute(k).getType() == Attribute.NOMINAL) { datosTest[i][k] /= Attributes.getInputAttribute(k).getNominalValuesList().size() - 1; } else { datosTest[i][k] -= Attributes.getInputAttribute(k).getMinAttribute(); datosTest[i][k] /= Attributes.getInputAttribute(k).getMaxAttribute() - Attributes.getInputAttribute(k).getMinAttribute(); } } } }
private void normalizarReferencia() throws CheckException { int i, j, cont = 0, k; Instance temp; boolean hecho; double caja[]; StringTokenizer tokens; boolean nulls[]; /*Check if dataset corresponding with a classification problem*/ if (Attributes.getOutputNumAttributes() < 1) { throw new CheckException( "This dataset haven´t outputs, so it not corresponding to a classification problem."); } else if (Attributes.getOutputNumAttributes() > 1) { throw new CheckException("This dataset have more of one output."); } if (Attributes.getOutputAttribute(0).getType() == Attribute.REAL) { throw new CheckException( "This dataset have an input attribute with floating values, so it not corresponding to a classification problem."); } datosReferencia = new double[referencia.getNumInstances()][Attributes.getInputNumAttributes()]; clasesReferencia = new int[referencia.getNumInstances()]; caja = new double[1]; /*Get the number of instances that have a null value*/ for (i = 0; i < referencia.getNumInstances(); i++) { temp = referencia.getInstance(i); nulls = temp.getInputMissingValues(); datosReferencia[i] = referencia.getInstance(i).getAllInputValues(); for (j = 0; j < nulls.length; j++) if (nulls[j]) datosReferencia[i][j] = 0.0; caja = referencia.getInstance(i).getAllOutputValues(); clasesReferencia[i] = (int) caja[0]; for (k = 0; k < datosReferencia[i].length; k++) { if (Attributes.getInputAttribute(k).getType() == Attribute.NOMINAL) { datosReferencia[i][k] /= Attributes.getInputAttribute(k).getNominalValuesList().size() - 1; } else { datosReferencia[i][k] -= Attributes.getInputAttribute(k).getMinAttribute(); datosReferencia[i][k] /= Attributes.getInputAttribute(k).getMaxAttribute() - Attributes.getInputAttribute(k).getMinAttribute(); } } } }
/** * Computes the distance between two instances (without previous normalization) * * @param i First instance * @param j Second instance * @return The Euclidean distance between i and j */ private double distance(Instance i, Instance j) { double dist = 0; int in = 0; int out = 0; for (int l = 0; l < nvariables; l++) { Attribute a = Attributes.getAttribute(l); direccion = a.getDirectionAttribute(); tipo = a.getType(); if (direccion == Attribute.INPUT) { if (tipo != Attribute.NOMINAL && !i.getInputMissingValues(in)) { // real value, apply euclidean distance dist += (i.getInputRealValues(in) - j.getInputRealValues(in)) * (i.getInputRealValues(in) - j.getInputRealValues(in)); } else { if (!i.getInputMissingValues(in) && i.getInputNominalValues(in) != j.getInputNominalValues(in)) dist += 1; } in++; } else { if (direccion == Attribute.OUTPUT) { if (tipo != Attribute.NOMINAL && !i.getOutputMissingValues(out)) { dist += (i.getOutputRealValues(out) - j.getOutputRealValues(out)) * (i.getOutputRealValues(out) - j.getOutputRealValues(out)); } else { if (!i.getOutputMissingValues(out) && i.getOutputNominalValues(out) != j.getOutputNominalValues(out)) dist += 1; } out++; } } } return dist; }
public void ejecutar() { int i, j, l, m; double alfai; int nClases; int claseObt; boolean marcas[]; boolean notFound; int init; int clasSel[]; int baraje[]; int pos, tmp; String instanciasIN[]; String instanciasOUT[]; long tiempo = System.currentTimeMillis(); /* Getting the number of differents classes */ nClases = 0; for (i = 0; i < clasesTrain.length; i++) if (clasesTrain[i] > nClases) nClases = clasesTrain[i]; nClases++; /* Shuffle the train set */ baraje = new int[datosTrain.length]; Randomize.setSeed(semilla); for (i = 0; i < datosTrain.length; i++) baraje[i] = i; for (i = 0; i < datosTrain.length; i++) { pos = Randomize.Randint(i, datosTrain.length - 1); tmp = baraje[i]; baraje[i] = baraje[pos]; baraje[pos] = tmp; } /* * Inicialization of the flagged instaces vector for a posterior * elimination */ marcas = new boolean[datosTrain.length]; for (i = 0; i < datosTrain.length; i++) marcas[i] = false; if (datosTrain.length > 0) { // marcas[baraje[0]] = true; //the first instance is included always nSel = n_p; if (nSel < nClases) nSel = nClases; } else { System.err.println("Input dataset is empty"); nSel = 0; } clasSel = new int[nClases]; System.out.print("Selecting initial neurons... "); // at least, there must be 1 neuron of each class at the beginning init = nClases; for (i = 0; i < nClases && i < datosTrain.length; i++) { pos = Randomize.Randint(0, datosTrain.length - 1); tmp = 0; while ((clasesTrain[pos] != i || marcas[pos]) && tmp < datosTrain.length) { pos = (pos + 1) % datosTrain.length; tmp++; } if (tmp < datosTrain.length) marcas[pos] = true; else init--; // clasSel[i] = i; } for (i = init; i < Math.min(nSel, datosTrain.length); i++) { tmp = 0; pos = Randomize.Randint(0, datosTrain.length - 1); while (marcas[pos]) { pos = (pos + 1) % datosTrain.length; tmp++; } // if(i<nClases){ // notFound = true; // do{ // for(j=i-1;j>=0 && notFound;j--){ // if(clasSel[j] == clasesTrain[pos]) // notFound = false; // } // if(!notFound) // pos = Randomize.Randint (0, datosTrain.length-1); // }while(!notFound); // } // clasSel[i] = clasesTrain[pos]; marcas[pos] = true; init++; } nSel = init; System.out.println("Initial neurons selected: " + nSel); /* Building of the S set from the flags */ conjS = new double[nSel][datosTrain[0].length]; clasesS = new int[nSel]; for (m = 0, l = 0; m < datosTrain.length; m++) { if (marcas[m]) { // the instance must be copied to the solution for (j = 0; j < datosTrain[0].length; j++) { conjS[l][j] = datosTrain[m][j]; } clasesS[l] = clasesTrain[m]; l++; } } alfai = alpha; boolean change = true; /* Body of the LVQ algorithm. */ // Train the network for (int it = 0; it < T && change; it++) { change = false; alpha = alfai; for (i = 1; i < datosTrain.length; i++) { // search for the nearest neuron to training instance pos = NN(nSel, conjS, datosTrain[baraje[i]]); // nearest neuron labels correctly the class of training // instance? if (clasesS[pos] != clasesTrain[baraje[i]]) { // NO - repel // the neuron for (j = 0; j < conjS[pos].length; j++) { conjS[pos][j] = conjS[pos][j] - alpha * (datosTrain[baraje[i]][j] - conjS[pos][j]); } change = true; } else { // YES - migrate the neuron towards the input vector for (j = 0; j < conjS[pos].length; j++) { conjS[pos][j] = conjS[pos][j] + alpha * (datosTrain[baraje[i]][j] - conjS[pos][j]); } } alpha = nu * alpha; } // Shuffle again the training partition baraje = new int[datosTrain.length]; for (i = 0; i < datosTrain.length; i++) baraje[i] = i; for (i = 0; i < datosTrain.length; i++) { pos = Randomize.Randint(i, datosTrain.length - 1); tmp = baraje[i]; baraje[i] = baraje[pos]; baraje[pos] = tmp; } } System.out.println( "LVQ " + relation + " " + (double) (System.currentTimeMillis() - tiempo) / 1000.0 + "s"); // Classify the train data set instanciasIN = new String[datosReferencia.length]; instanciasOUT = new String[datosReferencia.length]; for (i = 0; i < datosReferencia.length; i++) { /* Classify the instance selected in this iteration */ Attribute a = Attributes.getOutputAttribute(0); int tipo = a.getType(); claseObt = KNN.evaluacionKNN2(1, conjS, clasesS, datosReferencia[i], nClases); if (tipo != Attribute.NOMINAL) { instanciasIN[i] = new String(String.valueOf(clasesReferencia[i])); instanciasOUT[i] = new String(String.valueOf(claseObt)); } else { instanciasIN[i] = new String(a.getNominalValue(clasesReferencia[i])); instanciasOUT[i] = new String(a.getNominalValue(claseObt)); } } escribeSalida( ficheroSalida[0], instanciasIN, instanciasOUT, entradas, salida, nEntradas, relation); // Classify the test data set normalizarTest(); instanciasIN = new String[datosTest.length]; instanciasOUT = new String[datosTest.length]; for (i = 0; i < datosTest.length; i++) { /* Classify the instance selected in this iteration */ Attribute a = Attributes.getOutputAttribute(0); int tipo = a.getType(); claseObt = KNN.evaluacionKNN2(1, conjS, clasesS, datosTest[i], nClases); if (tipo != Attribute.NOMINAL) { instanciasIN[i] = new String(String.valueOf(clasesTest[i])); instanciasOUT[i] = new String(String.valueOf(claseObt)); } else { instanciasIN[i] = new String(a.getNominalValue(clasesTest[i])); instanciasOUT[i] = new String(a.getNominalValue(claseObt)); } } escribeSalida( ficheroSalida[1], instanciasIN, instanciasOUT, entradas, salida, nEntradas, relation); // Print the network to a file printNetworkToFile(ficheroSalida[2], referencia.getHeader()); }
/** * This function builds the data matrix for reference data and normalizes inputs values * * @throws keel.Algorithms.Preprocess.Basic.CheckException Can not be normalized. */ protected void normalizar() throws CheckException { int i, j, k; Instance temp; double caja[]; StringTokenizer tokens; boolean nulls[]; /*Check if dataset corresponding with a classification problem*/ if (Attributes.getOutputNumAttributes() < 1) { throw new CheckException( "This dataset haven?t outputs, so it not corresponding to a classification problem."); } else if (Attributes.getOutputNumAttributes() > 1) { throw new CheckException("This dataset have more of one output."); } if (Attributes.getOutputAttribute(0).getType() == Attribute.REAL) { throw new CheckException( "This dataset have an input attribute with floating values, so it not corresponding to a classification problem."); } entradas = Attributes.getInputAttributes(); salida = Attributes.getOutputAttribute(0); nEntradas = Attributes.getInputNumAttributes(); tokens = new StringTokenizer(training.getHeader(), " \n\r"); tokens.nextToken(); relation = tokens.nextToken(); datosTrain = new double[training.getNumInstances()][Attributes.getInputNumAttributes()]; clasesTrain = new int[training.getNumInstances()]; caja = new double[1]; nulosTrain = new boolean[training.getNumInstances()][Attributes.getInputNumAttributes()]; nominalTrain = new int[training.getNumInstances()][Attributes.getInputNumAttributes()]; realTrain = new double[training.getNumInstances()][Attributes.getInputNumAttributes()]; for (i = 0; i < training.getNumInstances(); i++) { temp = training.getInstance(i); nulls = temp.getInputMissingValues(); datosTrain[i] = training.getInstance(i).getAllInputValues(); for (j = 0; j < nulls.length; j++) if (nulls[j]) { datosTrain[i][j] = 0.0; nulosTrain[i][j] = true; } caja = training.getInstance(i).getAllOutputValues(); clasesTrain[i] = (int) caja[0]; for (k = 0; k < datosTrain[i].length; k++) { if (Attributes.getInputAttribute(k).getType() == Attribute.NOMINAL) { nominalTrain[i][k] = (int) datosTrain[i][k]; datosTrain[i][k] /= Attributes.getInputAttribute(k).getNominalValuesList().size() - 1; } else { realTrain[i][k] = datosTrain[i][k]; datosTrain[i][k] -= Attributes.getInputAttribute(k).getMinAttribute(); datosTrain[i][k] /= Attributes.getInputAttribute(k).getMaxAttribute() - Attributes.getInputAttribute(k).getMinAttribute(); if (Double.isNaN(datosTrain[i][k])) { datosTrain[i][k] = realTrain[i][k]; } } } } datosTest = new double[test.getNumInstances()][Attributes.getInputNumAttributes()]; clasesTest = new int[test.getNumInstances()]; caja = new double[1]; for (i = 0; i < test.getNumInstances(); i++) { temp = test.getInstance(i); nulls = temp.getInputMissingValues(); datosTest[i] = test.getInstance(i).getAllInputValues(); for (j = 0; j < nulls.length; j++) if (nulls[j]) { datosTest[i][j] = 0.0; } caja = test.getInstance(i).getAllOutputValues(); clasesTest[i] = (int) caja[0]; } } // end-method
/** * Process a dataset file for a classification problem. * * @param nfejemplos Name of the dataset file * @param train The dataset file is for training or for test * @throws java.io.IOException if there is any semantical, lexical or sintactical error in the * input file. */ public void processClassifierDataset(String nfejemplos, boolean train) throws IOException { try { // Load in memory a dataset that contains a classification problem IS.readSet(nfejemplos, train); nData = IS.getNumInstances(); nInputs = Attributes.getInputNumAttributes(); nVariables = nInputs + Attributes.getOutputNumAttributes(); // Check that there is only one output variable and // it is nominal if (Attributes.getOutputNumAttributes() > 1) { System.out.println("This algorithm can not process MIMO datasets"); System.out.println("All outputs but the first one will be removed"); } boolean noOutputs = false; if (Attributes.getOutputNumAttributes() < 1) { System.out.println("This algorithm can not process datasets without outputs"); System.out.println("Zero-valued output generated"); noOutputs = true; } // Initialize and fill our own tables X = new double[nData][nInputs]; missing = new boolean[nData][nInputs]; C = new int[nData]; // Maximum and minimum of inputs iMaximum = new double[nInputs]; iMinimum = new double[nInputs]; // Maximum and minimum for output data oMaximum = 0; oMinimum = 0; // All values are casted into double/integer nClasses = 0; for (int i = 0; i < X.length; i++) { Instance inst = IS.getInstance(i); for (int j = 0; j < nInputs; j++) { X[i][j] = IS.getInputNumericValue(i, j); missing[i][j] = inst.getInputMissingValues(j); if (X[i][j] > iMaximum[j] || i == 0) { iMaximum[j] = X[i][j]; } if (X[i][j] < iMinimum[j] || i == 0) { iMinimum[j] = X[i][j]; } } if (noOutputs) { C[i] = 0; } else { C[i] = (int) IS.getOutputNumericValue(i, 0); } if (C[i] > nClasses) { nClasses = C[i]; } } nClasses++; System.out.println("Number of classes=" + nClasses); } catch (Exception e) { System.out.println("DBG: Exception in readSet"); e.printStackTrace(); } }
/** Process the training and test files provided in the parameters file to the constructor. */ public void process() { // declarations double[] outputs; double[] outputs2; Instance neighbor; double dist, mean; int actual; Randomize rnd = new Randomize(); Instance ex; gCenter kmeans = null; int iterations = 0; double E; double prevE; int totalMissing = 0; boolean allMissing = true; rnd.setSeed(semilla); // PROCESS try { // Load in memory a dataset that contains a classification problem IS.readSet(input_train_name, true); int in = 0; int out = 0; ndatos = IS.getNumInstances(); nvariables = Attributes.getNumAttributes(); nentradas = Attributes.getInputNumAttributes(); nsalidas = Attributes.getOutputNumAttributes(); X = new String[ndatos][nvariables]; // matrix with transformed data kmeans = new gCenter(K, ndatos, nvariables); timesSeen = new FreqList[nvariables]; mostCommon = new String[nvariables]; // first, we choose k 'means' randomly from all // instances totalMissing = 0; for (int i = 0; i < ndatos; i++) { Instance inst = IS.getInstance(i); if (inst.existsAnyMissingValue()) totalMissing++; } if (totalMissing == ndatos) allMissing = true; else allMissing = false; for (int numMeans = 0; numMeans < K; numMeans++) { do { actual = (int) (ndatos * rnd.Rand()); ex = IS.getInstance(actual); } while (ex.existsAnyMissingValue() && !allMissing); kmeans.copyCenter(ex, numMeans); } // now, iterate adjusting clusters' centers and // instances to them prevE = 0; iterations = 0; do { for (int i = 0; i < ndatos; i++) { Instance inst = IS.getInstance(i); kmeans.setClusterOf(inst, i); } // set new centers kmeans.recalculateCenters(IS); // compute RMSE E = 0; for (int i = 0; i < ndatos; i++) { Instance inst = IS.getInstance(i); E += kmeans.distance(inst, kmeans.getClusterOf(i)); } iterations++; // System.out.println(iterations+"\t"+E); if (Math.abs(prevE - E) == 0) iterations = maxIter; else prevE = E; } while (E > minError && iterations < maxIter); for (int i = 0; i < ndatos; i++) { Instance inst = IS.getInstance(i); in = 0; out = 0; for (int j = 0; j < nvariables; j++) { Attribute a = Attributes.getAttribute(j); direccion = a.getDirectionAttribute(); tipo = a.getType(); if (direccion == Attribute.INPUT) { if (tipo != Attribute.NOMINAL && !inst.getInputMissingValues(in)) { X[i][j] = new String(String.valueOf(inst.getInputRealValues(in))); } else { if (!inst.getInputMissingValues(in)) X[i][j] = inst.getInputNominalValues(in); else { actual = kmeans.getClusterOf(i); X[i][j] = new String(kmeans.valueAt(actual, j)); } } in++; } else { if (direccion == Attribute.OUTPUT) { if (tipo != Attribute.NOMINAL && !inst.getOutputMissingValues(out)) { X[i][j] = new String(String.valueOf(inst.getOutputRealValues(out))); } else { if (!inst.getOutputMissingValues(out)) X[i][j] = inst.getOutputNominalValues(out); else { actual = kmeans.getClusterOf(i); X[i][j] = new String(kmeans.valueAt(actual, j)); } } out++; } } } } } catch (Exception e) { System.out.println("Dataset exception = " + e); e.printStackTrace(); System.exit(-1); } write_results(output_train_name); /** ************************************************************************************ */ // does a test file associated exist? if (input_train_name.compareTo(input_test_name) != 0) { try { // Load in memory a dataset that contains a classification problem IStest.readSet(input_test_name, false); int in = 0; int out = 0; ndatos = IStest.getNumInstances(); nvariables = Attributes.getNumAttributes(); nentradas = Attributes.getInputNumAttributes(); nsalidas = Attributes.getOutputNumAttributes(); for (int i = 0; i < ndatos; i++) { Instance inst = IStest.getInstance(i); in = 0; out = 0; for (int j = 0; j < nvariables; j++) { Attribute a = Attributes.getAttribute(j); direccion = a.getDirectionAttribute(); tipo = a.getType(); if (direccion == Attribute.INPUT) { if (tipo != Attribute.NOMINAL && !inst.getInputMissingValues(in)) { X[i][j] = new String(String.valueOf(inst.getInputRealValues(in))); } else { if (!inst.getInputMissingValues(in)) X[i][j] = inst.getInputNominalValues(in); else { actual = kmeans.getClusterOf(i); X[i][j] = new String(kmeans.valueAt(actual, j)); } } in++; } else { if (direccion == Attribute.OUTPUT) { if (tipo != Attribute.NOMINAL && !inst.getOutputMissingValues(out)) { X[i][j] = new String(String.valueOf(inst.getOutputRealValues(out))); } else { if (!inst.getOutputMissingValues(out)) X[i][j] = inst.getOutputNominalValues(out); else { actual = kmeans.getClusterOf(i); X[i][j] = new String(kmeans.valueAt(actual, j)); } } out++; } } } } } catch (Exception e) { System.out.println("Dataset exception = " + e); e.printStackTrace(); System.exit(-1); } write_results(output_test_name); } }