public void printTopWords(int numWords, boolean useNewLines) { class WordProb implements Comparable { int wi; double p; public WordProb(int wi, double p) { this.wi = wi; this.p = p; } public final int compareTo(Object o2) { if (p > ((WordProb) o2).p) return -1; else if (p == ((WordProb) o2).p) return 0; else return 1; } } WordProb[] wp = new WordProb[numTypes]; for (int ti = 0; ti < numTopics; ti++) { for (int wi = 0; wi < numTypes; wi++) wp[wi] = new WordProb(wi, ((double) typeTopicCounts[wi][ti]) / tokensPerTopic[ti]); Arrays.sort(wp); if (useNewLines) { System.out.println("\nTopic " + ti); for (int i = 0; i < numWords; i++) System.out.println( ilist.getDataAlphabet().lookupObject(wp[i].wi).toString() + " " + wp[i].p); } else { System.out.print("Topic " + ti + ": "); for (int i = 0; i < numWords; i++) System.out.print(ilist.getDataAlphabet().lookupObject(wp[i].wi).toString() + " "); System.out.println(); } } }
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int featuresLength; int version = in.readInt(); ilist = (InstanceList) in.readObject(); numTopics = in.readInt(); alpha = in.readDouble(); beta = in.readDouble(); tAlpha = in.readDouble(); vBeta = in.readDouble(); int numDocs = ilist.size(); topics = new int[numDocs][]; for (int di = 0; di < ilist.size(); di++) { int docLen = ((FeatureSequence) ilist.get(di).getData()).getLength(); topics[di] = new int[docLen]; for (int si = 0; si < docLen; si++) topics[di][si] = in.readInt(); } docTopicCounts = new int[numDocs][numTopics]; for (int di = 0; di < ilist.size(); di++) for (int ti = 0; ti < numTopics; ti++) docTopicCounts[di][ti] = in.readInt(); int numTypes = ilist.getDataAlphabet().size(); typeTopicCounts = new int[numTypes][numTopics]; for (int fi = 0; fi < numTypes; fi++) for (int ti = 0; ti < numTopics; ti++) typeTopicCounts[fi][ti] = in.readInt(); tokensPerTopic = new int[numTopics]; for (int ti = 0; ti < numTopics; ti++) tokensPerTopic[ti] = in.readInt(); }
public void printDocumentTopics(PrintWriter pw, double threshold, int max) { pw.println("#doc source topic proportion ..."); int docLen; double topicDist[] = new double[topics.length]; for (int di = 0; di < topics.length; di++) { pw.print(di); pw.print(' '); if (ilist.get(di).getSource() != null) { pw.print(ilist.get(di).getSource().toString()); } else { pw.print("null-source"); } pw.print(' '); docLen = topics[di].length; for (int ti = 0; ti < numTopics; ti++) topicDist[ti] = (((float) docTopicCounts[di][ti]) / docLen); if (max < 0) max = numTopics; for (int tp = 0; tp < max; tp++) { double maxvalue = 0; int maxindex = -1; for (int ti = 0; ti < numTopics; ti++) if (topicDist[ti] > maxvalue) { maxvalue = topicDist[ti]; maxindex = ti; } if (maxindex == -1 || topicDist[maxindex] < threshold) break; pw.print(maxindex + " " + topicDist[maxindex] + " "); topicDist[maxindex] = 0; } pw.println(' '); } }
public static ElementInMove_c resolveInstance( ModelRoot modelRoot, java.util.UUID p_m_elementid, java.util.UUID p_m_diagramid, float p_m_startingx, float p_m_startingy) { InstanceList instances = modelRoot.getInstanceList(ElementInMove_c.class); ElementInMove_c source = null; synchronized (instances) { Object[] key = {p_m_elementid, p_m_diagramid}; source = (ElementInMove_c) instances.get(key); if (source != null && !modelRoot.isCompareRoot()) { source.convertFromProxy(); source.batchUnrelate(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. source.m_elementid = IdAssigner.preprocessUUID(p_m_elementid); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. source.m_diagramid = IdAssigner.preprocessUUID(p_m_diagramid); source.m_startingx = p_m_startingx; source.m_startingy = p_m_startingy; return source; } } // there is no instance matching the id ElementInMove_c new_inst = new ElementInMove_c(modelRoot, p_m_elementid, p_m_diagramid, p_m_startingx, p_m_startingy); return new_inst; }
private InstanceList readFile() throws IOException { String NL = System.getProperty("line.separator"); Scanner scanner = new Scanner(new FileInputStream(fileName), encoding); ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); pipeList.add(new CharSequence2TokenSequence(Pattern.compile("\\p{L}\\p{L}+"))); pipeList.add(new TokenSequence2FeatureSequence()); InstanceList testing = new InstanceList(new SerialPipes(pipeList)); try { while (scanner.hasNextLine()) { String text = scanner.nextLine(); text = text.replaceAll("\\x0d", ""); Pattern patten = Pattern.compile("^(.*?),(.*?),(.*)$"); Matcher matcher = patten.matcher(text); if (matcher.find()) { docIds.add(matcher.group(1)); testing.addThruPipe(new Instance(matcher.group(3), null, "test instance", null)); } } } finally { scanner.close(); } return testing; }
public static InstanceList readFromFile(String inputfile) throws IOException { BufferedReader in = new BufferedReader(new FileReader(inputfile)); InstanceList instanceList = new InstanceList(); String line; while ((line = in.readLine()) != null) { String tokens[] = line.split(" "); int label = Integer.valueOf(tokens[0]); Instance ins = new Instance(label); for (int i = 1; i < tokens.length; i++) { String pair[] = tokens[i].split(":"); int attr = Integer.valueOf(pair[0]); double value = Double.valueOf(pair[1]); ins.addAttribute(attr, value); instanceList.addInstance(ins); } } return instanceList; }
/** This is (mostly) copied from CRF4.java */ public boolean[][] labelConnectionsIn( Alphabet outputAlphabet, InstanceList trainingSet, String start) { int numLabels = outputAlphabet.size(); boolean[][] connections = new boolean[numLabels][numLabels]; for (int i = 0; i < trainingSet.size(); i++) { Instance instance = trainingSet.getInstance(i); FeatureSequence output = (FeatureSequence) instance.getTarget(); for (int j = 1; j < output.size(); j++) { int sourceIndex = outputAlphabet.lookupIndex(output.get(j - 1)); int destIndex = outputAlphabet.lookupIndex(output.get(j)); assert (sourceIndex >= 0 && destIndex >= 0); connections[sourceIndex][destIndex] = true; } } // Handle start state if (start != null) { int startIndex = outputAlphabet.lookupIndex(start); for (int j = 0; j < outputAlphabet.size(); j++) { connections[startIndex][j] = true; } } return connections; }
public static void clearInstances(ModelRoot modelRoot) { InstanceList instances = modelRoot.getInstanceList(InformalArgument_c.class); synchronized (instances) { for (int i = instances.size() - 1; i >= 0; i--) { ((NonRootModelElement) instances.get(i)).delete_unchecked(); } } }
public static BridgeInvocation_c createProxy( ModelRoot modelRoot, java.util.UUID p_m_statement_id, java.util.UUID p_m_brg_id, int p_m_bridgenamelinenumber, int p_m_bridgenamecolumn, int p_m_externalentitykeyletterslinenumber, int p_m_externalentitykeyletterscolumn, String p_contentPath, IPath p_localPath) { ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot, p_contentPath, p_localPath); // if a model root was not resolved it is most likely // due to a missing file of the proxy, defualt back to // the original model root if (resolvedModelRoot != null) modelRoot = resolvedModelRoot; InstanceList instances = modelRoot.getInstanceList(BridgeInvocation_c.class); BridgeInvocation_c new_inst = null; synchronized (instances) { Object[] key = {p_m_statement_id}; new_inst = (BridgeInvocation_c) instances.get(key); } String contentPath = PersistenceUtil.resolveRelativePath(p_localPath, new Path(p_contentPath)); if (modelRoot.isNewCompareRoot()) { // for comparisons we do not want to change // the content path contentPath = p_contentPath; } if (new_inst != null && !modelRoot.isCompareRoot()) { PersistableModelComponent pmc = new_inst.getPersistableComponent(); if (pmc == null) { // dangling reference, redo this instance new_inst.batchUnrelate(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. new_inst.m_statement_id = IdAssigner.preprocessUUID(p_m_statement_id); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. new_inst.m_brg_id = IdAssigner.preprocessUUID(p_m_brg_id); new_inst.m_bridgenamelinenumber = p_m_bridgenamelinenumber; new_inst.m_bridgenamecolumn = p_m_bridgenamecolumn; new_inst.m_externalentitykeyletterslinenumber = p_m_externalentitykeyletterslinenumber; new_inst.m_externalentitykeyletterscolumn = p_m_externalentitykeyletterscolumn; } } if (new_inst == null) { // there is no instance matching the id, create a proxy // if the resource doesn't exist then this will be a dangling reference new_inst = new BridgeInvocation_c( modelRoot, p_m_statement_id, p_m_brg_id, p_m_bridgenamelinenumber, p_m_bridgenamecolumn, p_m_externalentitykeyletterslinenumber, p_m_externalentitykeyletterscolumn); new_inst.m_contentPath = contentPath; } return new_inst; }
public void count() { TIntIntHashMap docCounts = new TIntIntHashMap(); int index = 0; if (instances.size() == 0) { logger.info("Instance list is empty"); return; } if (instances.get(0).getData() instanceof FeatureSequence) { for (Instance instance : instances) { FeatureSequence features = (FeatureSequence) instance.getData(); for (int i = 0; i < features.getLength(); i++) { docCounts.adjustOrPutValue(features.getIndexAtPosition(i), 1, 1); } int[] keys = docCounts.keys(); for (int i = 0; i < keys.length - 1; i++) { int feature = keys[i]; featureCounts[feature] += docCounts.get(feature); documentFrequencies[feature]++; } docCounts = new TIntIntHashMap(); index++; if (index % 1000 == 0) { System.err.println(index); } } } else if (instances.get(0).getData() instanceof FeatureVector) { for (Instance instance : instances) { FeatureVector features = (FeatureVector) instance.getData(); for (int location = 0; location < features.numLocations(); location++) { int feature = features.indexAtLocation(location); double value = features.valueAtLocation(location); documentFrequencies[feature]++; featureCounts[feature] += value; } index++; if (index % 1000 == 0) { System.err.println(index); } } } else { logger.info("Unsupported data class: " + instances.get(0).getData().getClass().getName()); } }
public static void batchRelateAll( ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots, boolean relateProxies) { InstanceList instances = modelRoot.getInstanceList(InformalArgument_c.class); synchronized (instances) { Iterator<NonRootModelElement> cursor = instances.iterator(); while (cursor.hasNext()) { final InformalArgument_c inst = (InformalArgument_c) cursor.next(); inst.batchRelate(modelRoot, relateProxies, notifyChanges, searchAllRoots); } } }
public void estimate( InstanceList documents, int numIterations, int showTopicsInterval, int outputModelInterval, String outputModelFilename, Randoms r) { ilist = documents.shallowClone(); numTypes = ilist.getDataAlphabet().size(); int numDocs = ilist.size(); topics = new int[numDocs][]; docTopicCounts = new int[numDocs][numTopics]; typeTopicCounts = new int[numTypes][numTopics]; tokensPerTopic = new int[numTopics]; tAlpha = alpha * numTopics; vBeta = beta * numTypes; long startTime = System.currentTimeMillis(); // Initialize with random assignments of tokens to topics // and finish allocating this.topics and this.tokens int topic, seqLen; FeatureSequence fs; for (int di = 0; di < numDocs; di++) { try { fs = (FeatureSequence) ilist.get(di).getData(); } catch (ClassCastException e) { System.err.println( "LDA and other topic models expect FeatureSequence data, not FeatureVector data. " + "With text2vectors, you can obtain such data with --keep-sequence or --keep-bisequence."); throw e; } seqLen = fs.getLength(); numTokens += seqLen; topics[di] = new int[seqLen]; // Randomly assign tokens to topics for (int si = 0; si < seqLen; si++) { topic = r.nextInt(numTopics); topics[di][si] = topic; docTopicCounts[di][topic]++; typeTopicCounts[fs.getIndexAtPosition(si)][topic]++; tokensPerTopic[topic]++; } } this.estimate( 0, numDocs, numIterations, showTopicsInterval, outputModelInterval, outputModelFilename, r); // 124.5 seconds // 144.8 seconds after using FeatureSequence instead of tokens[][] array // 121.6 seconds after putting "final" on FeatureSequence.getIndexAtPosition() // 106.3 seconds after avoiding array lookup in inner loop with a temporary variable }
private static Graphnode_c findGraphnodeInstance( ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent) { InstanceList instances = modelRoot.getInstanceList(Graphnode_c.class); synchronized (instances) { for (int i = 0; i < instances.size(); ++i) { Graphnode_c x = (Graphnode_c) instances.get(i); if (test == null || test.evaluate(x)) { return x; } } } return null; }
private static InformalArgument_c findInformalArgumentInstance( ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent) { InstanceList instances = modelRoot.getInstanceList(InformalArgument_c.class); synchronized (instances) { for (int i = 0; i < instances.size(); ++i) { InformalArgument_c x = (InformalArgument_c) instances.get(i); if (test == null || test.evaluate(x)) { if (x.ensureLoaded(loadComponent)) return x; } } } return null; }
public InstanceList readArray(String[] cleanTexts) { StringArrayIterator iterator = new StringArrayIterator(cleanTexts); // Construct a new instance list, passing it the pipe we want to use to // process instances. InstanceList instances = new InstanceList(pipe); int index = 0; for (Instance inst : instances) { inst.setName(name_id.get(index)); inst.setTarget("english"); index++; } // Now process each instance provided by the iterator. instances.addThruPipe(iterator); return instances; }
public void generateTestInference() { if (lda == null) { System.out.println("Should run lda estimation first."); System.exit(1); return; } if (testTopicDistribution == null) testTopicDistribution = new double[test.size()][]; TopicInferencer infer = lda.getInferencer(); int iterations = 800; int thinning = 5; int burnIn = 100; for (int ti = 0; ti < test.size(); ti++) { testTopicDistribution[ti] = infer.getSampledDistribution(test.get(ti), iterations, thinning, burnIn); } }
public FeatureCountTool(InstanceList instances) { this.instances = instances; numFeatures = instances.getDataAlphabet().size(); featureCounts = new double[numFeatures]; documentFrequencies = new int[numFeatures]; }
// Just for testing. Recommend instead is mallet/bin/vectors2topics public static void main(String[] args) { InstanceList ilist = InstanceList.load(new File(args[0])); int numIterations = args.length > 1 ? Integer.parseInt(args[1]) : 1000; int numTopWords = args.length > 2 ? Integer.parseInt(args[2]) : 20; System.out.println("Data loaded."); TopicalNGrams tng = new TopicalNGrams(10); tng.estimate(ilist, 200, 1, 0, null, new Randoms()); tng.printTopWords(60, true); }
private InstanceList generateInstanceList() throws Exception { ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); pipeList.add(new CharSequence2TokenSequence(Pattern.compile("\\p{L}\\p{L}+"))); pipeList.add(new TokenSequence2FeatureSequence()); Reader fileReader = new InputStreamReader(new FileInputStream(new File(fileName)), "UTF-8"); InstanceList instances = new InstanceList(new SerialPipes(pipeList)); instances.addThruPipe( new CsvIterator( fileReader, Pattern.compile("^(\\S*)[\\s,]*(\\S*)[\\s,]*(.*)$"), 3, 2, 1)); // data, label, name fields return instances; }
public static ComponentInComponent_c createProxy( ModelRoot modelRoot, java.util.UUID p_m_id, java.util.UUID p_m_parent_id, String p_contentPath, IPath p_localPath) { ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot, p_contentPath, p_localPath); // if a model root was not resolved it is most likely // due to a missing file of the proxy, defualt back to // the original model root if (resolvedModelRoot != null) modelRoot = resolvedModelRoot; InstanceList instances = modelRoot.getInstanceList(ComponentInComponent_c.class); ComponentInComponent_c new_inst = null; synchronized (instances) { Object[] key = {p_m_id}; new_inst = (ComponentInComponent_c) instances.get(key); } String contentPath = PersistenceUtil.resolveRelativePath(p_localPath, new Path(p_contentPath)); if (modelRoot.isNewCompareRoot()) { // for comparisons we do not want to change // the content path contentPath = p_contentPath; } if (new_inst != null && !modelRoot.isCompareRoot()) { PersistableModelComponent pmc = new_inst.getPersistableComponent(); if (pmc == null) { // dangling reference, redo this instance new_inst.batchUnrelate(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. new_inst.m_id = IdAssigner.preprocessUUID(p_m_id); // extract 28 bit value only new_inst.m_idLongBased = 0xfffffff & p_m_id.getLeastSignificantBits(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. new_inst.m_parent_id = IdAssigner.preprocessUUID(p_m_parent_id); } } if (new_inst == null) { // there is no instance matching the id, create a proxy // if the resource doesn't exist then this will be a dangling reference new_inst = new ComponentInComponent_c(modelRoot, p_m_id, p_m_parent_id); new_inst.m_contentPath = contentPath; } return new_inst; }
public static InformalArgument_c resolveInstance(ModelRoot modelRoot, java.util.UUID p_m_arg_id) { InstanceList instances = modelRoot.getInstanceList(InformalArgument_c.class); InformalArgument_c source = null; synchronized (instances) { Object[] key = {p_m_arg_id}; source = (InformalArgument_c) instances.get(key); if (source != null && !modelRoot.isCompareRoot()) { source.convertFromProxy(); source.batchUnrelate(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. source.m_arg_id = IdAssigner.preprocessUUID(p_m_arg_id); return source; } } // there is no instance matching the id InformalArgument_c new_inst = new InformalArgument_c(modelRoot, p_m_arg_id); return new_inst; }
public void doInference() { try { ParallelTopicModel model = ParallelTopicModel.read(new File(inferencerFile)); TopicInferencer inferencer = model.getInferencer(); // TopicInferencer inferencer = // TopicInferencer.read(new File(inferencerFile)); // InstanceList testing = readFile(); readFile(); InstanceList testing = generateInstanceList(); // readFile(); for (int i = 0; i < testing.size(); i++) { StringBuilder probabilities = new StringBuilder(); double[] testProbabilities = inferencer.getSampledDistribution(testing.get(i), 10, 1, 5); ArrayList probabilityList = new ArrayList(); for (int j = 0; j < testProbabilities.length; j++) { probabilityList.add(new Pair<Integer, Double>(j, testProbabilities[j])); } Collections.sort(probabilityList, new CustomComparator()); for (int j = 0; j < testProbabilities.length && j < topN; j++) { if (j > 0) probabilities.append(" "); probabilities.append( ((Pair<Integer, Double>) probabilityList.get(j)).getFirst().toString() + "," + ((Pair<Integer, Double>) probabilityList.get(j)).getSecond().toString()); } System.out.println(docIds.get(i) + "," + probabilities.toString()); } } catch (Exception e) { e.printStackTrace(); System.err.println(e.getMessage()); } }
// Recommended to use mallet/bin/vectors2topics instead. public static void main(String[] args) throws IOException { InstanceList ilist = InstanceList.load(new File(args[0])); int numIterations = args.length > 1 ? Integer.parseInt(args[1]) : 1000; int numTopWords = args.length > 2 ? Integer.parseInt(args[2]) : 20; System.out.println("Data loaded."); LDA lda = new LDA(10); lda.estimate(ilist, numIterations, 50, 0, null, new Randoms()); // should be 1100 lda.printTopWords(numTopWords, true); lda.printDocumentTopics(new File(args[0] + ".lda")); }
public static Graphnode_c[] GraphnodeInstances( ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent) { InstanceList instances = modelRoot.getInstanceList(Graphnode_c.class); Vector matches = new Vector(); synchronized (instances) { for (int i = 0; i < instances.size(); ++i) { Graphnode_c x = (Graphnode_c) instances.get(i); if (test == null || test.evaluate(x)) { matches.add(x); } } if (matches.size() > 0) { Graphnode_c[] ret_set = new Graphnode_c[matches.size()]; matches.copyInto(ret_set); return ret_set; } else { return new Graphnode_c[0]; } } }
public void printState(PrintWriter pw) { Alphabet a = ilist.getDataAlphabet(); pw.println("#doc pos typeindex type topic"); for (int di = 0; di < topics.length; di++) { FeatureSequence fs = (FeatureSequence) ilist.get(di).getData(); for (int si = 0; si < topics[di].length; si++) { int type = fs.getIndexAtPosition(si); pw.print(di); pw.print(' '); pw.print(si); pw.print(' '); pw.print(type); pw.print(' '); pw.print(a.lookupObject(type)); pw.print(' '); pw.print(topics[di][si]); pw.println(); } } }
public static void main(String[] args) throws Exception { CommandOption.setSummary( FeatureCountTool.class, "Print feature counts and instances per feature (eg document frequencies) in an instance list"); CommandOption.process(FeatureCountTool.class, args); InstanceList instances = InstanceList.load(new File(inputFile.value)); FeatureCountTool counter = new FeatureCountTool(instances); counter.count(); counter.printCounts(); }
public void test() throws Exception { ParallelTopicModel model = ParallelTopicModel.read(new File(inferencerFile)); TopicInferencer inferencer = model.getInferencer(); ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); pipeList.add(new CharSequence2TokenSequence(Pattern.compile("\\p{L}\\p{L}+"))); pipeList.add(new TokenSequence2FeatureSequence()); InstanceList instances = new InstanceList(new SerialPipes(pipeList)); Reader fileReader = new InputStreamReader(new FileInputStream(new File(fileName)), "UTF-8"); instances.addThruPipe( new CsvIterator( fileReader, Pattern.compile("^(\\S*)[\\s,]*(\\S*)[\\s,]*(.*)$"), 3, 2, 1)); // data, label, name fields double[] testProbabilities = inferencer.getSampledDistribution(instances.get(1), 10, 1, 5); for (int i = 0; i < 1000; i++) System.out.println(i + ": " + testProbabilities[i]); }
/* One iteration of Gibbs sampling, across all documents. */ public void sampleTopicsForAllDocs(Randoms r) { double[] topicWeights = new double[numTopics]; // Loop over every word in the corpus for (int di = 0; di < topics.length; di++) { sampleTopicsForOneDoc( (FeatureSequence) ilist.get(di).getData(), topics[di], docTopicCounts[di], topicWeights, r); } }
public static void main(String[] args) { // String malletFile = "dataset/vlc_lectures.all.en.f8.mallet"; // String simFile = "dataset/vlc/sim5p.csv"; // String solutionFile = "dataset/vlc/task1_solution.en.f8.lm.txt"; // String queryFile = "dataset/task1_query.en.f8.txt"; // String targetFile = "dataset/task1_target.en.f8.txt"; String malletFile = "dataset/vlc/folds/all.0.4189.mallet"; String trainMalletFile = "dataset/vlc/folds/training.0.mallet"; String testMalletFile = "dataset/vlc/folds/test.0.mallet"; String queryFile = "dataset/vlc/folds/query.0.csv"; String linkFile = "dataset/vlc/folds/trainingPairs.0.csv"; String targetFile = "dataset/vlc/folds/target.0.csv"; String solutionFile = "dataset/vlc/task1_solution.en.f8.lm.txt"; int numTopics = 160; int numIterations = 200; double alpha = 0.0016; double beta = 0.0001; InstanceList train = InstanceList.load(new File(trainMalletFile)); InstanceList test = InstanceList.load(new File(testMalletFile)); SeparateParallelLda spl = new SeparateParallelLda(train, test); spl.trainDocuments(numTopics, numIterations, alpha, beta); spl.generateTestInference(); spl.lda.printTopWords(System.out, 10, true); BasicTask1Solution solver = new Task1SolutionWithSeparateData(spl); double precision; try { solver.retrieveTask1Solution(queryFile, solutionFile); precision = Task1Solution.evaluateResult(targetFile, solutionFile); System.out.println( String.format( "SeparateParallelLda: iteration: %d, precisoion: %f", numIterations, precision)); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } }
public static BridgeInvocation_c resolveInstance( ModelRoot modelRoot, java.util.UUID p_m_statement_id, java.util.UUID p_m_brg_id, int p_m_bridgenamelinenumber, int p_m_bridgenamecolumn, int p_m_externalentitykeyletterslinenumber, int p_m_externalentitykeyletterscolumn) { InstanceList instances = modelRoot.getInstanceList(BridgeInvocation_c.class); BridgeInvocation_c source = null; synchronized (instances) { Object[] key = {p_m_statement_id}; source = (BridgeInvocation_c) instances.get(key); if (source != null && !modelRoot.isCompareRoot()) { source.convertFromProxy(); source.batchUnrelate(); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. source.m_statement_id = IdAssigner.preprocessUUID(p_m_statement_id); // pre-process the uuid so that we re-use null uuid instance rather then creating a new one. source.m_brg_id = IdAssigner.preprocessUUID(p_m_brg_id); source.m_bridgenamelinenumber = p_m_bridgenamelinenumber; source.m_bridgenamecolumn = p_m_bridgenamecolumn; source.m_externalentitykeyletterslinenumber = p_m_externalentitykeyletterslinenumber; source.m_externalentitykeyletterscolumn = p_m_externalentitykeyletterscolumn; return source; } } // there is no instance matching the id BridgeInvocation_c new_inst = new BridgeInvocation_c( modelRoot, p_m_statement_id, p_m_brg_id, p_m_bridgenamelinenumber, p_m_bridgenamecolumn, p_m_externalentitykeyletterslinenumber, p_m_externalentitykeyletterscolumn); return new_inst; }