public void testShortestPath() { graph.prettyPrint(); IndexedWord word1 = graph.getNodeByIndex(10); IndexedWord word2 = graph.getNodeByIndex(14); System.out.println("word1: " + word1); System.out.println("word1: " + word1.hashCode()); System.out.println("word2: " + word2); System.out.println("word2: " + word2.hashCode()); System.out.println("word eq: " + word1.equals(word2)); System.out.println("word eq: " + (word1.hashCode() == word2.hashCode())); System.out.println("word eq: " + (word1.toString().equals(word2.toString()))); List<SemanticGraphEdge> edges = graph.getShortestUndirectedPathEdges(word1, word2); System.out.println("path: " + edges); assertNotNull(edges); List<IndexedWord> nodes = graph.getShortestUndirectedPathNodes(word1, word2); System.out.println("path: " + nodes); assertNotNull(nodes); assertEquals(word1, nodes.get(0)); assertEquals(word2, nodes.get(nodes.size() - 1)); edges = graph.getShortestUndirectedPathEdges(word1, word1); System.out.println("path: " + edges); assertNotNull(edges); assertEquals(0, edges.size()); nodes = graph.getShortestUndirectedPathNodes(word1, word1); System.out.println("path: " + nodes); assertNotNull(nodes); assertEquals(1, nodes.size()); assertEquals(word1, nodes.get(0)); }
private void testParseTree() { try { Properties props = new Properties(); props.setProperty("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref"); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); // read some text in the text variable String text = "Give me a list of all bandleaders that play trumpet."; // create an empty Annotation just with the given text Annotation document = new Annotation(text); // run all Annotators on this text pipeline.annotate(document); // these are all the sentences in this document // a CoreMap is essentially a Map that uses class objects as keys and has values with custom // types List<CoreMap> sentences = document.get(SentencesAnnotation.class); for (CoreMap sentence : sentences) { // traversing the words in the current sentence // a CoreLabel is a CoreMap with additional token-specific methods // this is the parse tree of the current sentence Tree tree = sentence.get(TreeAnnotation.class); // this is the Stanford dependency graph of the current sentence SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class); Set<IndexedWord> vertices = dependencies.vertexSet(); List<SemanticGraphEdge> edges = dependencies.edgeListSorted(); for (SemanticGraphEdge e : edges) {} for (IndexedWord i : vertices) { System.out.println(i.toString()); } } } catch (Exception e) { } }