private void optimize() { logger.info("optimizing ... (" + Helper.getMemInfo() + ")"); graph.optimize(); logger.info("finished optimize (" + Helper.getMemInfo() + ")"); // move this into the GraphStorage.optimize method? if (sortGraph) { logger.info("sorting ... (" + Helper.getMemInfo() + ")"); GraphStorage newGraph = GHUtility.newStorage(graph); GHUtility.sortDFS(graph, newGraph); graph = newGraph; } }
@Override void startWayProcessing() { LoggerFactory.getLogger(getClass()) .info( "finished node processing. osmIdMap:" + (int) (osmIdToIndexMap.capacity() * (12f + 1) / Helper.MB) + "MB, " + Helper.getMemInfo()); }
private void flush() { logger.info( "flushing graph " + graph.toString() + ", details:" + graph.toDetailsString() + ", " + Helper.getMemInfo() + ")"); graph.flush(); }
public void prepare() { boolean tmpPrepare = doPrepare && prepare != null; graph.getProperties().put("prepare.done", tmpPrepare); if (tmpPrepare) { if (prepare instanceof PrepareContractionHierarchies && encodingManager.getVehicleCount() > 1) { throw new IllegalArgumentException( "Contraction hierarchies preparation " + "requires (at the moment) only one vehicle. But was:" + encodingManager); } logger.info("calling prepare.doWork ... (" + Helper.getMemInfo() + ")"); prepare.doWork(); } }
/** * Preprocessing of OSM file to select nodes which are used for highways. This allows a more * compact graph data structure. */ @Override public void preProcess(InputStream osmXml) { pillarLats.create(Math.max(expectedNodes / 50, 100)); pillarLons.create(Math.max(expectedNodes / 50, 100)); if (osmXml == null) throw new AssertionError("Stream cannot be empty"); XMLInputFactory factory = XMLInputFactory.newInstance(); XMLStreamReader sReader = null; try { sReader = factory.createXMLStreamReader(osmXml, "UTF-8"); long tmpCounter = 1; for (int event = sReader.next(); event != XMLStreamConstants.END_DOCUMENT; event = sReader.next(), tmpCounter++) { if (tmpCounter % 50000000 == 0) logger.info( nf(tmpCounter) + " (preprocess), osmIdMap:" + nf(osmIdToIndexMap.size()) + " (" + nf(osmIdToIndexMap.capacity()) + ") " + Helper.getMemInfo()); switch (event) { case XMLStreamConstants.START_ELEMENT: if ("way".equals(sReader.getLocalName())) { boolean valid = parseWay(sReader); if (valid) { int s = wayNodes.size(); for (int index = 0; index < s; index++) { setHasHighways(wayNodes.get(index)); } } } break; } } } catch (XMLStreamException ex) { throw new RuntimeException("Problem while parsing file", ex); } finally { Helper7.close(sReader); } }
protected void cleanUp() { int prev = graph.getNodes(); PrepareRoutingSubnetworks preparation = new PrepareRoutingSubnetworks(graph, encodingManager); logger.info("start finding subnetworks, " + Helper.getMemInfo()); preparation.doWork(); int n = graph.getNodes(); // calculate remaining subnetworks int remainingSubnetworks = preparation.findSubnetworks().size(); logger.info( "edges: " + graph.getAllEdges().getMaxId() + ", nodes " + n + ", there were " + preparation.getSubNetworks() + " subnetworks. removed them => " + (prev - n) + " less nodes. Remaining subnetworks:" + remainingSubnetworks); }
protected OSMReader importOSM(String _osmFile) throws IOException { if (graph == null) throw new IllegalStateException("Load or init graph before import OSM data"); setOSMFile(_osmFile); File osmTmpFile = new File(osmFile); if (!osmTmpFile.exists()) { throw new IllegalStateException( "Your specified OSM file does not exist:" + osmTmpFile.getAbsolutePath()); } logger.info("start creating graph from " + osmFile); OSMReader reader = new OSMReader(graph, expectedCapacity) .setWorkerThreads(workerThreads) .setEncodingManager(encodingManager) .setWayPointMaxDistance(wayPointMaxDistance) .setEnableInstructions(enableInstructions); logger.info("using " + graph.toString() + ", memory:" + Helper.getMemInfo()); reader.doOSM2Graph(osmTmpFile); return reader; }
void contractNodes() { meanDegree = g.getAllEdges().getMaxId() / g.getNodes(); int level = 1; counter = 0; int initSize = sortedNodes.getSize(); int logSize = (int) Math.round(Math.max(10, sortedNodes.getSize() / 100 * logMessagesPercentage)); if (logMessagesPercentage == 0) logSize = Integer.MAX_VALUE; // preparation takes longer but queries are slightly faster with preparation // => enable it but call not so often boolean periodicUpdate = true; StopWatch periodSW = new StopWatch(); int updateCounter = 0; int periodicUpdatesCount = Math.max(10, sortedNodes.getSize() / 100 * periodicUpdatesPercentage); if (periodicUpdatesPercentage == 0) periodicUpdate = false; // disable as preparation is slower and query time does not benefit int lastNodesLazyUpdates = lastNodesLazyUpdatePercentage == 0 ? 0 : sortedNodes.getSize() / 100 * lastNodesLazyUpdatePercentage; StopWatch lazySW = new StopWatch(); // Recompute priority of uncontracted neighbors. // Without neighborupdates preparation is faster but we need them // to slightly improve query time. Also if not applied too often it decreases the shortcut // number. boolean neighborUpdate = true; if (neighborUpdatePercentage == 0) neighborUpdate = false; StopWatch neighborSW = new StopWatch(); LevelGraphStorage lg = ((LevelGraphStorage) g); while (!sortedNodes.isEmpty()) { // periodically update priorities of ALL nodes if (periodicUpdate && counter > 0 && counter % periodicUpdatesCount == 0) { periodSW.start(); sortedNodes.clear(); int len = g.getNodes(); for (int node = 0; node < len; node++) { if (g.getLevel(node) != 0) continue; int priority = oldPriorities[node] = calculatePriority(node); sortedNodes.insert(node, priority); } periodSW.stop(); updateCounter++; } if (counter % logSize == 0) { // TODO necessary? System.gc(); logger.info( Helper.nf(counter) + ", updates:" + updateCounter + ", nodes: " + Helper.nf(sortedNodes.getSize()) + ", shortcuts:" + Helper.nf(newShortcuts) + ", dijkstras:" + Helper.nf(dijkstraCount) + ", t(dijk):" + (int) dijkstraSW.getSeconds() + ", t(period):" + (int) periodSW.getSeconds() + ", t(lazy):" + (int) lazySW.getSeconds() + ", t(neighbor):" + (int) neighborSW.getSeconds() + ", meanDegree:" + (long) meanDegree + ", algo:" + algo.getMemoryUsageAsString() + ", " + Helper.getMemInfo()); dijkstraSW = new StopWatch(); periodSW = new StopWatch(); lazySW = new StopWatch(); neighborSW = new StopWatch(); } counter++; int polledNode = sortedNodes.pollKey(); if (sortedNodes.getSize() < lastNodesLazyUpdates) { lazySW.start(); int priority = oldPriorities[polledNode] = calculatePriority(polledNode); if (!sortedNodes.isEmpty() && priority > sortedNodes.peekValue()) { // current node got more important => insert as new value and contract it later sortedNodes.insert(polledNode, priority); lazySW.stop(); continue; } lazySW.stop(); } // contract! newShortcuts += addShortcuts(polledNode); g.setLevel(polledNode, level); level++; EdgeSkipIterator iter = vehicleAllExplorer.setBaseNode(polledNode); while (iter.next()) { int nn = iter.getAdjNode(); if (g.getLevel(nn) != 0) // already contracted no update necessary continue; if (neighborUpdate && rand.nextInt(100) < neighborUpdatePercentage) { neighborSW.start(); int oldPrio = oldPriorities[nn]; int priority = oldPriorities[nn] = calculatePriority(nn); if (priority != oldPrio) sortedNodes.update(nn, oldPrio, priority); neighborSW.stop(); } if (removesHigher2LowerEdges) lg.disconnect(vehicleAllTmpExplorer, iter); } } // Preparation works only once so we can release temporary data. // The preparation object itself has to be intact to create the algorithm. close(); logger.info( "took:" + (int) allSW.stop().getSeconds() + ", new shortcuts: " + newShortcuts + ", " + prepareWeighting + ", " + prepareEncoder + ", removeHigher2LowerEdges:" + removesHigher2LowerEdges + ", dijkstras:" + dijkstraCount + ", t(dijk):" + (int) dijkstraSW.getSeconds() + ", t(period):" + (int) periodSW.getSeconds() + ", t(lazy):" + (int) lazySW.getSeconds() + ", t(neighbor):" + (int) neighborSW.getSeconds() + ", meanDegree:" + (long) meanDegree + ", initSize:" + initSize + ", periodic:" + periodicUpdatesPercentage + ", lazy:" + lastNodesLazyUpdatePercentage + ", neighbor:" + neighborUpdatePercentage); }