/** Computes the next step of the Power Method. */ public void step() throws IOException { double[] oldRank = rank, newRank = previousRank; DoubleArrays.fill(newRank, 0.0); // for each node, calculate its outdegree and redistribute its rank among pointed nodes double accum = 0.0; progressLogger.expectedUpdates = numNodes; progressLogger.start("Iteration " + (++iterationNumber) + "..."); final ArcLabelledNodeIterator nodeIterator = g.nodeIterator(); int i, outdegree, j, n = numNodes; int[] succ; Label[] lab; while (n-- != 0) { i = nodeIterator.nextInt(); outdegree = nodeIterator.outdegree(); if (outdegree == 0 || buckets != null && buckets.get(i)) accum += oldRank[i]; else { j = outdegree; succ = nodeIterator.successorArray(); lab = nodeIterator.labelArray(); while (j-- != 0) { newRank[succ[j]] += (oldRank[i] * lab[j].getFloat()) / sumoutweight[i]; } } progressLogger.update(); } progressLogger.done(); final double accumOverNumNodes = accum / numNodes; final double oneOverNumNodes = 1.0 / numNodes; if (preference != null) if (preferentialAdjustment == null) for (i = numNodes; i-- != 0; ) newRank[i] = alpha * newRank[i] + (1 - alpha) * preference.getDouble(i) + alpha * accumOverNumNodes; else for (i = numNodes; i-- != 0; ) newRank[i] = alpha * newRank[i] + (1 - alpha) * preference.getDouble(i) + alpha * accum * preferentialAdjustment.getDouble(i); else if (preferentialAdjustment == null) for (i = numNodes; i-- != 0; ) newRank[i] = alpha * newRank[i] + (1 - alpha) * oneOverNumNodes + alpha * accumOverNumNodes; else for (i = numNodes; i-- != 0; ) newRank[i] = alpha * newRank[i] + (1 - alpha) * oneOverNumNodes + alpha * accum * preferentialAdjustment.getDouble(i); // make the rank just computed the new rank rank = newRank; previousRank = oldRank; // Compute derivatives. n = iterationNumber; if (subset == null) { for (i = 0; i < order.length; i++) { final int k = order[i]; final double alphak = Math.pow(alpha, k); final double nFallingK = Util.falling(n, k); for (j = 0; j < numNodes; j++) derivative[i][j] += nFallingK * (rank[j] - previousRank[j]) / alphak; } } else { for (i = 0; i < order.length; i++) { final int k = order[i]; final double alphak = Math.pow(alpha, k); final double nFallingK = Util.falling(n, k); for (int t : subset) derivative[i][t] += nFallingK * (rank[t] - previousRank[t]) / alphak; } } // Compute coefficients, if required. if (coeffBasename != null) { final DataOutputStream coefficients = new DataOutputStream( new FastBufferedOutputStream( new FileOutputStream(coeffBasename + "-" + (iterationNumber)))); final double alphaN = Math.pow(alpha, n); for (i = 0; i < numNodes; i++) coefficients.writeDouble((rank[i] - previousRank[i]) / alphaN); coefficients.close(); } }
/** * For a specific sub-set of blocks (child nodes), find a 'base' subset of parents for which the * block's logLikelihood is not -Infinity * * @param candidateParentsPerNode * @param chosenArcsPerNode * @param setOfBlocks * @return */ protected double getOutOfMinusInfinity( Int2ObjectOpenHashMap<IntOpenHashSet> candidateParentsPerNode, Int2ObjectOpenHashMap<ObjectOpenHashSet<Arc>> chosenArcsPerNode, IntOpenHashSet setOfBlocks, TIntDoubleHashMap logLPerNode) { double totalLogL = 0; ProgressLogger pl = new ProgressLogger(LOGGER, ProgressLogger.TEN_SECONDS, "blocks"); pl.start("Begin initializing, to avoid zero likelihood, using set-cover heuristic"); pl.expectedUpdates = setOfBlocks.size(); int nArcs = 0; for (int v : setOfBlocks) { pl.update(); IntOpenHashSet vParents = candidateParentsPerNode.get(v); Int2ObjectOpenHashMap<IntOpenHashSet> parentActions = new Int2ObjectOpenHashMap<IntOpenHashSet>(); Int2ObjectOpenHashMap<IntArrayList> cPlusV = auxiliary.getCplusOnline(v); Int2ObjectOpenHashMap<IntArrayList> cMinusV = auxiliary.getCminusOnline(v); if (cPlusV != null) { IntSet actions = cPlusV.keySet(); // Heuristic: first add the parents that participate in A+ for // most actions for (int action : actions) { for (int u : cPlusV.get(action)) { if (!parentActions.containsKey(u)) { parentActions.put(u, new IntOpenHashSet()); } parentActions.get(u).add(action); } } } KeepMaximum km = new KeepMaximum(); km.addAllKey2Listsize(parentActions); IntOpenHashSet baseSetOfParents = new IntOpenHashSet(); double logL = Double.NEGATIVE_INFINITY; while (logL == Double.NEGATIVE_INFINITY && (km.getMaximumKey() != -1)) { int u = km.getMaximumKey(); if (baseSetOfParents.contains(u)) { throw new IllegalStateException("Attempted to add twice the same parent"); } baseSetOfParents.add(u); logL = blockLogLikelihood(v, cPlusV, cMinusV, baseSetOfParents); IntOpenHashSet uActions = parentActions.get(u); for (int parent : vParents) { parentActions.get(parent).removeAll(uActions); } vParents.remove(u); parentActions.remove(u); km.reset(); km.addAllKey2Listsize(parentActions); } // keep track of the likelihood totalLogL += logL; if (logLPerNode != null) { logLPerNode.put(v, logL); } chosenArcsPerNode.put(v, new ObjectOpenHashSet<Arc>()); for (int u : baseSetOfParents) { nArcs++; chosenArcsPerNode.get(v).add(new Arc(u, v)); } } pl.stop("Done initialization. Added " + nArcs + " arcs, logLikelihood=" + totalLogL); return totalLogL; }