/** * This method is the access point to the planning procedure. Initially, it adds all variables * from axioms to the set of found vars, then does the linear planning. If lp does not solve the * problem and there are subtasks, goal-driven recursive planning with backtracking is invoked. * Planning is performed until no new variables are introduced into the algorithm. */ public EvaluationAlgorithm invokePlaning(Problem problem, boolean _computeAll) { long startTime = System.currentTimeMillis(); computeAll = _computeAll; EvaluationAlgorithm algorithm = new EvaluationAlgorithm(); PlanningContext context = problem.getCurrentContext(); // add all axioms at the beginning of an algorithm Collection<Var> flattened = new HashSet<Var>(); for (Iterator<Rel> axiomIter = problem.getAxioms().iterator(); axiomIter.hasNext(); ) { Rel rel = axiomIter.next(); unfoldVarsToSet(rel.getOutputs(), flattened); // do not overwrite values of variables that come via args of compute() or as inputs of // independent subtasks if (!problem.getAssumptions().containsAll(flattened) // do not overwrite values of already known variables. // typically this is the case when a value of a variable // is given in a scheme via a properties window // && !problem.getKnownVars().containsAll( flattened ) ) { algorithm.addRel(rel); } axiomIter.remove(); context.getKnownVars().addAll(flattened); flattened.clear(); } context.getFoundVars().addAll(context.getKnownVars()); // remove all known vars with no relations for (Iterator<Var> varIter = context.getKnownVars().iterator(); varIter.hasNext(); ) { if (varIter.next().getRels().isEmpty()) { varIter.remove(); } } // start planning if (problem.getRelsWithSubtasks().isEmpty() && linearForwardSearch(context, algorithm, computeAll)) { if (isLinearLoggingOn()) logger.debug("Problem solved without subtasks"); } else if (!problem.getRelsWithSubtasks().isEmpty() && subtaskPlanning(problem, algorithm)) { if (isLinearLoggingOn()) logger.debug("Problem solved with subtasks"); } else if (!computeAll) { if (isLinearLoggingOn()) logger.debug("Problem not solved"); } if (!nested) { logger.info("Planning time: " + (System.currentTimeMillis() - startTime) + "ms."); } return algorithm; }
public static void MLalgo() { try { Problem problem = new Problem(); problem.l = train_count; // number of training examples problem.n = max_feature_count; // number of features problem.x = train_matrix; // feature nodes problem.y = ylable; // target values; SolverType solver = SolverType.L2R_LR; // -s 0 double C = 1.0; // cost of constraints violation double eps = 0.01; // stopping criteria Parameter parameter = new Parameter(solver, C, eps); model = Linear.train(problem, parameter); File modelFile = new File("model"); model.save(modelFile); // load model or use it directly model = Model.load(modelFile); } catch (Exception e) { e.printStackTrace(); } }
void getInfo() { Scanner console = new Scanner(System.in); int maxCost = 0, maxSols = 0; Problem prob = null; Solver solve = null; ArrStack stk = new ArrStack(); System.out.print("Enter problem type, solution type, " + "max cost and max # of solutions: "); try { prob = (Problem) Class.forName(console.next()).newInstance(); solve = (Solver) Class.forName(console.next()).newInstance(); maxCost = console.nextInt(); maxSols = console.nextInt(); } catch (Exception e) { System.out.println("" + e); } try { prob.read(console); } catch (Exception e) { System.out.println("Read error: " + e); return; } Solver.Solution[] sols; sols = solve.solveProblem(prob, maxCost, maxSols); if (sols == null) { System.out.println("No solutions "); return; } System.out.println("Answers are: "); for (int ans = 0; ans < sols.length && sols[ans] != null; ans++) { System.out.println("Answer " + ans + " with cost " + (sols[ans].mSteps.length - 1)); for (int stepNdx = 1; stepNdx < sols[ans].mSteps.length; stepNdx++) System.out.println(" " + sols[ans].mSteps[stepNdx]); } }
private void mainClassifierFunction(int option, String trainFile, String testFile, String ddgFile) throws IOException { // SentimentClassifierHindi this = new SentimentClassifierHindi(); // int finalSize = this.SentimentClassifierHindi(); int finalSize = this.generateFeature(option, trainFile, testFile, ddgFile); System.out.println("Hello aspectCategorizationSemEval2016!"); // Create features Problem problem = new Problem(); // Save X to problem double a[] = new double[this.trainingFeature.size()]; File file = new File(rootDirectory + "\\dataset\\trainingLabels.txt"); BufferedReader reader = new BufferedReader(new FileReader(file)); String read; int count = 0; while ((read = reader.readLine()) != null) { // System.out.println(read); a[count++] = Double.parseDouble(read.toString()); } // Feature[][] f = new Feature[][]{ {}, {}, {}, {}, {}, {} }; // trainingFeature = trainingObject.getList(); Feature[][] trainFeatureVector = new Feature[trainingFeature.size()][finalSize]; System.out.println("Training Instances: " + trainingFeature.size()); System.out.println("Feature Length: " + finalSize); System.out.println("Test Instances: " + testFeature.size()); for (int i = 0; i < trainingFeature.size(); i++) { // System.out.println(); // System.out.println(trainingFeature.get(i)); System.out.println(i + " trained."); for (int j = 0; j < finalSize; j++) { // System.out.print(trainingFeature.get(i).get(j + 1)+" "); // trainingFeature.get(i). if (trainingFeature.get(i).containsKey(j + 1)) { // System.out.print(j + 1 + ", "); trainFeatureVector[i][j] = new FeatureNode(j + 1, trainingFeature.get(i).get(j + 1)); } else { trainFeatureVector[i][j] = new FeatureNode(j + 1, 0.0); } } // System.out.println(); } problem.l = trainingFeature.size(); // number of training examples problem.n = finalSize; // number of features problem.x = trainFeatureVector; // feature nodes problem.y = a; // target values ---- BasicParser bp = new BasicParser(); SolverType solver = SolverType.L2R_LR; // -s 7 double C = 0.75; // cost of constraints violation double eps = 0.0001; // stopping criteria Parameter parameter = new Parameter(solver, C, eps); Model model = Linear.train(problem, parameter); File modelFile = new File("model"); model.save(modelFile); // PrintWriter write = new PrintWriter(new BufferedWriter(new FileWriter(rootDirectory + // "\\dataset\\predictedLabels.txt"))); PrintWriter write = new PrintWriter( new BufferedWriter( new FileWriter( rootDirectory + "\\dataset\\dataset_aspectCategorization\\predictedHotelsLabels.txt"))); if (option == 1) { BufferedReader trainReader = new BufferedReader( new FileReader( new File( rootDirectory + "\\dataset\\dataset_aspectCategorization\\" + trainFile))); HashMap<String, Integer> id = new HashMap<String, Integer>(); HashMap<String, String> review = new HashMap<String, String>(); double[] val = new double[trainingFeature.size()]; double[] tempVal = new double[trainingFeature.size()]; LinearCopy.crossValidation(problem, parameter, 5, val, tempVal); for (int i = 0; i < trainingFeature.size(); i++) { int flag = 0; String tokens[] = trainReader.readLine().split("\\|"); if (id.containsKey(tokens[1]) == true || tokens[2].compareToIgnoreCase("True") == 0) { } else { // System.out.println(tokens[1]); /*int max = -1; double probMax = -1.0; for(int j=0; j<13; j++){ if(probMax<val[i][j]){ probMax = val[i][j]; max = j; } }*/ // System.out.println(tempVal[i]); write.println((int) (val[i])); write.println("next"); id.put(tokens[1], 1); System.out.println(tokens[1] + "\t" + (int) (val[i])); if (review.containsKey(tokens[1])) { System.out.println(tokens[3]); System.out.println(review.get(tokens[1])); } else { review.put(tokens[1], tokens[3]); } } /*else{ for (int j = 0; j < 13; j++) { //System.out.print(val[i][j]+", "); if (val[i] >= 0.185) { flag = 1; //System.out.println("i"); write.println(j + 1); } } if (flag == 1) { write.println("next"); } else { write.println("-1"); write.println("next"); } //write.println(prediction); id.put(tokens[1], 1); //System.out.println(); }*/ } write.close(); return; } if (option == 3) { System.out.println(rootDirectory); BufferedReader testReader = new BufferedReader( new FileReader( new File( rootDirectory + "\\dataset\\dataset_aspectCategorization\\" + testFile))); HashMap<String, Integer> id = new HashMap<String, Integer>(); model = Model.load(modelFile); int countNext = 0; for (int i = 0; i < testFeature.size(); i++) { // System.out.println(i+", "+testFeature.size()+", "+testFeature.get(i).size()); Feature[] instance = new Feature[testFeature.get(i).size()]; int j = 0; for (Map.Entry<Integer, Double> entry : testFeature.get(i).entrySet()) { // System.out.print(entry.getKey() + ": " + entry.getValue() + "; "); // listOfMaps.get(i).put(start + entry.getKey(), entry.getValue()); // do stuff instance[j++] = new FeatureNode(entry.getKey(), entry.getValue()); } // double d = LinearCopy.predict(model, instance); double[] predict = new double[85]; double prediction = LinearCopy.predictProbability(model, instance, predict); int labelMap[] = new int[13]; labelMap = model.getLabels(); for (int ar = 0; ar < labelMap.length; ar++) { System.out.println("********************** " + ar + ": " + labelMap[ar]); } // System.out.println(prediction); // Arrays.sort(predict, Collections.reverseOrder()); // System.out.println(); // double prediction = LinearCopy.predict(model, instance); String tokens[] = testReader.readLine().split("\\|"); // System.out.println(tokens[1]); int flag = -1; if (id.containsKey(tokens[1]) == true || tokens[2].compareToIgnoreCase("True") == 0) { flag = 4; // System.out.println("OutofScope: "+tokens[1]); } else if (tokens[3].compareToIgnoreCase("abc") == 0) { flag = 2; System.out.println(tokens[1]); write.println("-1"); write.println("next"); countNext++; id.put(tokens[1], 1); } else { flag = 0; for (int p = 0; p < 85; p++) { if (predict[p] >= 0.128) { flag = 1; write.println(labelMap[p]); } } if (flag == 1) { countNext++; write.println("next"); } else { countNext++; write.println("-1"); write.println("next"); } // write.println((int)d); // write.println("next"); /*write.println(prediction); write.println("next");*/ id.put(tokens[1], 1); } if (flag == -1) { System.out.println("-1, " + tokens[1]); } } write.close(); System.out.println("count " + countNext); } write.close(); }
/** * Goal-driven recursive (depth-first, exhaustive) search with backtracking * * @param problem * @param algorithm * @param subtaskRelsInPath * @param depth */ private boolean subtaskPlanningImpl( PlanningContext context, Set<Rel> relsWithSubtasks, EvaluationAlgorithm algorithm, LinkedList<Rel> subtaskRelsInPath, int depth) { Set<Rel> relsWithSubtasksCopy = new LinkedHashSet<Rel>(relsWithSubtasks); Set<Rel> relsWithSubtasksToRemove = new LinkedHashSet<Rel>(); boolean firstMLB = true; // start building Maximal Linear Branch (MLB) MLB: while (!relsWithSubtasksCopy.isEmpty()) { if (isSubtaskLoggingOn()) { String print = p(depth) + "Starting new MLB with: "; for (Rel rel : relsWithSubtasksCopy) { print += "\n" + p(depth) + " " + rel.getParent().getFullName() + " : " + rel.getDeclaration(); } /* print += "\n" + p( depth ) + " All remaining rels in problem:"; for ( Rel rel : problem.getAllRels() ) { print += "\n" + p( depth ) + " " + rel.getParentObjectName() + " : " + rel.getDeclaration(); } print += "\n" + p( depth ) + "All found variables: "; for ( Var var : problem.getFoundVars() ) { print += "\n" + p( depth ) + " " + var.toString(); } */ logger.debug(print); } // if this is a first attempt to construct an MLB to solve a subtask(i.e. depth>0), // do not invoke linear planning because it has already been done if ((depth == 0) || !firstMLB) { boolean solvedIntermediately = linearForwardSearch(context, algorithm, true); // Having constructed some MLBs the (sub)problem may be solved // and there is no need in wasting precious time planning unnecessary branches if (solvedIntermediately && ( // on the top level optimize only if computing goals (depth == 0 && !computeAll) // otherwise (inside subtasks) always optimize || (depth != 0))) { // If the problem is solved, optimize and return if (!isOptDisabled) Optimizer.optimize(context, algorithm); return true; } } else { firstMLB = false; } // or children OR: for (Iterator<Rel> subtaskRelIterator = relsWithSubtasksCopy.iterator(); subtaskRelIterator.hasNext(); ) { Rel subtaskRel = subtaskRelIterator.next(); if (isSubtaskLoggingOn()) logger.debug( p(depth) + "OR: depth: " + (depth + 1) + " rel - " + subtaskRel.getParent().getFullName() + " : " + subtaskRel.getDeclaration()); if (subtaskRel.equals(subtaskRelsInPath.peekLast()) || (!context.isRelReadyToUse(subtaskRel)) || context.getFoundVars().containsAll(subtaskRel.getOutputs()) || (!isSubtaskRepetitionAllowed && subtaskRelsInPath.contains(subtaskRel))) { if (isSubtaskLoggingOn()) { logger.debug(p(depth) + "skipped"); if (!context.isRelReadyToUse(subtaskRel)) { logger.debug(p(depth) + "because it has unknown inputs"); // TODO print unknown } else if (context.getFoundVars().containsAll(subtaskRel.getOutputs())) { logger.debug(p(depth) + "because all outputs in FoundVars"); } else if (subtaskRel.equals(subtaskRelsInPath.peekLast())) { logger.debug(p(depth) + "because it is nested in itself"); } else if (!isSubtaskRepetitionAllowed && subtaskRelsInPath.contains(subtaskRel)) { logger.debug( p(depth) + "This rel with subtasks is already in use, path: " + subtaskRelsInPath); } } continue OR; } LinkedList<Rel> newPath = new LinkedList<Rel>(subtaskRelsInPath); newPath.add(subtaskRel); PlanningResult result = new PlanningResult(subtaskRel, true); // this is true if all subtasks are solvable boolean allSolved = true; // and children AND: for (SubtaskRel subtask : subtaskRel.getSubtasks()) { if (isSubtaskLoggingOn()) logger.debug(p(depth) + "AND: subtask - " + subtask); EvaluationAlgorithm sbtAlgorithm = null; ////////////////////// INDEPENDENT SUBTASK//////////////////////////////////////// if (subtask.isIndependent()) { if (isSubtaskLoggingOn()) logger.debug("Independent!!!"); if (subtask.isSolvable() == null) { if (isSubtaskLoggingOn()) logger.debug("Start solving independent subtask " + subtask.getDeclaration()); // independent subtask is solved only once Problem problemContext = subtask.getContext(); DepthFirstPlanner planner = new DepthFirstPlanner(); planner.indSubtasks = indSubtasks; planner.nested = true; sbtAlgorithm = planner.invokePlaning(problemContext, isOptDisabled); PlanningContext indCntx = problemContext.getCurrentContext(); boolean solved = indCntx.getFoundVars().containsAll(indCntx.getAllGoals()); if (solved) { subtask.setSolvable(Boolean.TRUE); indSubtasks.put(subtask, sbtAlgorithm); if (isSubtaskLoggingOn()) logger.debug("Solved " + subtask.getDeclaration()); } else { subtask.setSolvable(Boolean.FALSE); if (RuntimeProperties.isLogInfoEnabled()) { logger.debug("Unable to solve " + subtask.getDeclaration()); } } allSolved &= solved; } else if (subtask.isSolvable() == Boolean.TRUE) { if (isSubtaskLoggingOn()) logger.debug("Already solved"); allSolved &= true; sbtAlgorithm = indSubtasks.get(subtask); } else { if (isSubtaskLoggingOn()) logger.debug("Not solvable"); allSolved &= false; } if (isSubtaskLoggingOn()) logger.debug("End of independent subtask " + subtask); if (!allSolved) { continue OR; } assert sbtAlgorithm != null; result.addSubtaskAlgorithm(subtask, sbtAlgorithm); } ////////////////////// DEPENDENT SUBTASK////////////////////////////////////// else { // lets clone the environment PlanningContext newContext = prepareNewContext(context, subtask); sbtAlgorithm = new EvaluationAlgorithm(); // during linear planning, if some goals are found, they are removed from the set // "goals" boolean solved = linearForwardSearch( newContext, sbtAlgorithm, // do not optimize here, because the solution may require additional rels with // subtasks true); if (solved) { if (isSubtaskLoggingOn()) logger.debug(p(depth) + "SOLVED subtask: " + subtask); if (!isOptDisabled) { // if a subtask has been solved, optimize its algorithm Optimizer.optimize(newContext, sbtAlgorithm); } result.addSubtaskAlgorithm(subtask, sbtAlgorithm); allSolved &= solved; continue AND; } else if (!solved && (depth == maxDepth)) { if (isSubtaskLoggingOn()) logger.debug(p(depth) + "NOT SOLVED and cannot go any deeper, subtask: " + subtask); continue OR; } if (isSubtaskLoggingOn()) logger.debug(p(depth) + "Recursing deeper"); solved = subtaskPlanningImpl(newContext, relsWithSubtasks, sbtAlgorithm, newPath, depth + 1); if (isSubtaskLoggingOn()) logger.debug(p(depth) + "Back to depth " + (depth + 1)); // the linear planning has been performed at the end of MLB on the depth+1, // if the problem was solved, there is no need to run linear planning again if ((solved || (solved = linearForwardSearch(newContext, sbtAlgorithm, true))) && !isOptDisabled) { // if solved, optimize here with full list of goals in order to get rid of // unnecessary subtask instances and other relations Optimizer.optimize(newContext, sbtAlgorithm); } if (isSubtaskLoggingOn()) logger.debug(p(depth) + (solved ? "" : "NOT") + " SOLVED subtask: " + subtask); allSolved &= solved; // if at least one subtask is not solvable, try another // branch if (!allSolved) { continue OR; } result.addSubtaskAlgorithm(subtask, sbtAlgorithm); } } // AND if (allSolved) { algorithm.add(result); Set<Var> newVars = new LinkedHashSet<Var>(); unfoldVarsToSet(subtaskRel.getOutputs(), newVars); context.getKnownVars().addAll(newVars); context.getFoundVars().addAll(newVars); subtaskRelIterator.remove(); if (isSubtaskLoggingOn()) { logger.debug( p(depth) + "SOLVED ALL SUBTASKS for " + subtaskRel.getParent().getFullName() + " : " + subtaskRel.getDeclaration()); logger.debug(p(depth) + "Updating the problem graph and continuing building new MLB"); } // this is used for incremental dfs if (depth == 0) { relsWithSubtasksToRemove.add(subtaskRel); } continue MLB; } if (isSubtaskLoggingOn()) logger.debug( p(depth) + "NOT SOLVED ALL subtasks, removing from path " + subtaskRel.getParent().getFullName() + " : " + subtaskRel.getDeclaration()); newPath.remove(subtaskRel); } // end OR // exit loop because there are no more rels with subtasks to be // applied // (i.e. no more rels can introduce new variables into the // algorithm) if (isSubtaskLoggingOn()) logger.debug(p(depth) + "No more MLB can be constructed"); break MLB; } // incremental dfs, remove solved subtasks if (depth == 0) { relsWithSubtasks.removeAll(relsWithSubtasksToRemove); } return false; }
private boolean subtaskPlanning(Problem problem, EvaluationAlgorithm algorithm) { if (isSubtaskLoggingOn()) logger.debug("!!!--------- Starting Planning With Subtasks ---------!!!"); final int maxDepthBackup = maxDepth; if (isSubtaskLoggingOn()) logger.debug( "maxDepthBackup:" + maxDepthBackup + " sbt: " + problem.getRelsWithSubtasks().size()); PlanningContext context = problem.getCurrentContext(); try { Set<Rel> relsWithSubtasks = new LinkedHashSet<Rel>(problem.getRelsWithSubtasks()); if (isIncremental) { int incrementalDepth = 0; while (incrementalDepth <= (isSubtaskRepetitionAllowed ? maxDepthBackup : problem.getRelsWithSubtasks().size() - 1)) { if (isSubtaskLoggingOn()) logger.debug( "Incremental dfs, with max depth " + (incrementalDepth + 1) + " and " + problem.getRelsWithSubtasks().size() + " subtasks to solve"); maxDepth = incrementalDepth++; // if we need to compute some specific goals, after reaching a certain depth, but not the // maximal depth, // the problem may be solved and there is no need to go any deeper. if (subtaskPlanningImpl(context, relsWithSubtasks, algorithm, new LinkedList<Rel>(), 0)) { if (isSubtaskLoggingOn()) logger.debug("The problem was solved during idfs after some intermediate MLB"); return true; } if (isSubtaskLoggingOn()) logger.debug("Unsolved subtask left: " + problem.getRelsWithSubtasks().size()); } if (isSubtaskLoggingOn()) logger.debug("Fininshed incremental dfs"); } else { if (!isSubtaskRepetitionAllowed) { maxDepth = problem.getRelsWithSubtasks().size() - 1; } if (isSubtaskLoggingOn()) logger.debug("Starting subtask dfs with maxDepth: " + (maxDepth + 1)); if (subtaskPlanningImpl(context, relsWithSubtasks, algorithm, new LinkedList<Rel>(), 0)) { if (isSubtaskLoggingOn()) logger.debug("The problem was solved during dfs after some intermediate MLB"); return true; } } } finally { if (isSubtaskLoggingOn()) logger.debug("Fininshed dfs"); maxDepth = maxDepthBackup; indSubtasks.clear(); } if (isSubtaskLoggingOn()) logger.debug("Invoking final linear planning"); return linearForwardSearch(context, algorithm, computeAll); }