public static NodeStore generateNodeStore(int nodeCount) { final NodeStore nodeStore = new NodeStore(); Node n; GraphFactoryImpl factory = GraphFactoryImpl.getInstance(); for (int i = 0; i < nodeCount; i++) { n = factory.newNode(String.valueOf(i)); nodeStore.add(n); } return nodeStore; }
public boolean isStoreOk() { return getStoreOk() && relTypeStore.getStoreOk() && propStore.getStoreOk() && relStore.getStoreOk() && nodeStore.getStoreOk(); }
public void updateIdGenerators() { this.updateHighId(); relTypeStore.updateIdGenerators(); propStore.updateIdGenerators(); relStore.updateHighId(); nodeStore.updateHighId(); }
public void logIdUsage(StringLogger.LineLogger msgLog) { msgLog.logLine("Id usage:"); nodeStore.logIdUsage(msgLog); relStore.logIdUsage(msgLog); relTypeStore.logIdUsage(msgLog); propStore.logIdUsage(msgLog); stringLogger.flush(); }
@Override public void rebuildIdGenerators() { relTypeStore.rebuildIdGenerators(); propStore.rebuildIdGenerators(); relStore.rebuildIdGenerators(); nodeStore.rebuildIdGenerators(); super.rebuildIdGenerators(); }
private void loadNodes(ArrayList<Node> nodeLoadBuffer) { long actualIds[] = null; long timestart = System.nanoTime(); try { actualIds = nodeStore.bulkAddNodes(dbid, nodeLoadBuffer); long timetaken = (System.nanoTime() - timestart); nodesLoaded += nodeLoadBuffer.size(); // Check that expected ids were allocated assert (actualIds.length == nodeLoadBuffer.size()); for (int i = 0; i < actualIds.length; i++) { if (nodeLoadBuffer.get(i).id != actualIds[i]) { logger.warn( "Expected ID of node: " + nodeLoadBuffer.get(i).id + " != " + actualIds[i] + " the actual ID"); } } nodeLoadBuffer.clear(); // convert to microseconds stats.addStats(LinkBenchOp.LOAD_NODE_BULK, timetaken / 1000, false); latencyStats.recordLatency(loaderId, LinkBenchOp.LOAD_NODE_BULK, timetaken); if (nodesLoaded >= nextReport) { double totalTimeTaken = (System.currentTimeMillis() - startTime_ms) / 1000.0; logger.debug( String.format( "Loader #%d: %d/%d nodes loaded at %f nodes/sec", loaderId, nodesLoaded, totalNodes, nodesLoaded / totalTimeTaken)); nextReport += REPORT_INTERVAL; } } catch (Throwable e) { // Catch exception if any long endtime2 = System.nanoTime(); long timetaken2 = (endtime2 - timestart) / 1000; logger.error("Error: " + e.getMessage(), e); stats.addStats(LinkBenchOp.LOAD_NODE_BULK, timetaken2, true); nodeStore.clearErrors(loaderId); nodeLoadBuffer.clear(); return; } }
@Override public List<WindowPoolStats> getAllWindowPoolStats() { List<WindowPoolStats> list = new ArrayList<WindowPoolStats>(); list.addAll(nodeStore.getAllWindowPoolStats()); list.addAll(propStore.getAllWindowPoolStats()); list.addAll(relStore.getAllWindowPoolStats()); list.addAll(relTypeStore.getAllWindowPoolStats()); return list; }
@Override public void makeStoreOk() { relTypeStore.makeStoreOk(); propStore.makeStoreOk(); relStore.makeStoreOk(); nodeStore.makeStoreOk(); super.makeStoreOk(); isStarted = true; }
@Override public void flushAll() { if (relTypeStore == null || propStore == null || relStore == null || nodeStore == null) { return; } relTypeStore.flushAll(); propStore.flushAll(); relStore.flushAll(); nodeStore.flushAll(); }
@Override public void run() { logger.info("Starting loader thread #" + loaderId + " loading nodes"); try { this.nodeStore.initialize(props, Phase.LOAD, loaderId); } catch (Exception e) { logger.error("Error while initializing store", e); throw new RuntimeException(e); } try { // Set up ids to start at desired range nodeStore.resetNodeStore(dbid, ConfigUtil.getLong(props, Config.MIN_ID)); } catch (Exception e) { logger.error("Error while resetting IDs, cannot proceed with " + "node loading", e); return; } int bulkLoadBatchSize = nodeStore.bulkLoadBatchSize(); ArrayList<Node> nodeLoadBuffer = new ArrayList<Node>(bulkLoadBatchSize); long maxId = ConfigUtil.getLong(props, Config.MAX_ID); long startId = ConfigUtil.getLong(props, Config.MIN_ID); totalNodes = maxId - startId; nextReport = startId + REPORT_INTERVAL; startTime_ms = System.currentTimeMillis(); lastDisplayTime_ms = startTime_ms; for (long id = startId; id < maxId; id++) { genNode(rng, id, nodeLoadBuffer, bulkLoadBatchSize); long now = System.currentTimeMillis(); if (lastDisplayTime_ms + displayFreq_ms <= now) { displayAndResetStats(); } } // Load any remaining data loadNodes(nodeLoadBuffer); logger.info("Loading of nodes [" + startId + "," + maxId + ") done"); displayAndResetStats(); nodeStore.close(); }
@Override public void logVersions(StringLogger.LineLogger msgLog) { msgLog.logLine("Store versions:"); super.logVersions(msgLog); nodeStore.logVersions(msgLog); relStore.logVersions(msgLog); relTypeStore.logVersions(msgLog); propStore.logVersions(msgLog); stringLogger.flush(); }
public static EdgeImpl[] generateEdgeList( NodeStore nodeStore, int edgeCount, int type, boolean directed, boolean allowSelfLoops) { int nodeCount = nodeStore.size(); final List<EdgeImpl> edgeList = new ArrayList<>(); LongSet idSet = new LongOpenHashSet(); Random r = new Random(124); IntSet leafs = new IntOpenHashSet(); if (nodeCount > 10) { for (int i = 0; i < Math.min(10, (int) (nodeCount * .05)); i++) { int id = r.nextInt(nodeCount); if (leafs.contains(id)) { i--; } else { leafs.add(id); } } } long cnt = 0; while (idSet.size() < edgeCount) { int sourceId = r.nextInt(nodeCount); int targetId = r.nextInt(nodeCount); Node source = nodeStore.get(sourceId); Node target = nodeStore.get(targetId); EdgeImpl edge = new EdgeImpl(cnt++, source, target, 1.0, directed); if (!leafs.contains(sourceId) && !leafs.contains(targetId) && (allowSelfLoops || (!allowSelfLoops && source != target)) && !idSet.contains(edge.getLongId())) { edgeList.add(edge); idSet.add(edge.getLongId()); } } return edgeList.toArray(new EdgeImpl[0]); }
/** Closes the node,relationship,property and relationship type stores. */ @Override protected void closeStorage() { if (lastCommittedTxIdSetter != null) lastCommittedTxIdSetter.close(); if (relTypeStore != null) { relTypeStore.close(); relTypeStore = null; } if (propStore != null) { propStore.close(); propStore = null; } if (relStore != null) { relStore.close(); relStore = null; } if (nodeStore != null) { nodeStore.close(); nodeStore = null; } }
@Test public void shouldGrowAFileWhileContinuingToMemoryMapNewRegions() throws Exception { // don't run on windows because memory mapping doesn't work properly there assumeTrue(!osIsWindows()); // given int NUMBER_OF_RECORDS = 1000000; File storeDir = TargetDirectory.forTest(getClass()).makeGraphDbDir(); Config config = new Config( stringMap( mapped_memory_total_size.name(), mmapSize(NUMBER_OF_RECORDS, NodeStore.RECORD_SIZE), Configuration.store_dir.name(), storeDir.getPath()), NodeStore.Configuration.class); DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory(); Monitors monitors = new Monitors(); DefaultFileSystemAbstraction fileSystemAbstraction = new DefaultFileSystemAbstraction(); PageCache pageCache = pageCacheRule.getPageCache(fileSystemAbstraction, config); StoreFactory storeFactory = new StoreFactory( config, idGeneratorFactory, pageCache, fileSystemAbstraction, StringLogger.DEV_NULL, monitors); File fileName = new File(storeDir, NeoStore.DEFAULT_NAME + ".nodestore.db"); storeFactory.createEmptyStore( fileName, storeFactory.buildTypeDescriptorAndVersion(NodeStore.TYPE_DESCRIPTOR)); NodeStore nodeStore = new NodeStore( fileName, config, idGeneratorFactory, pageCache, fileSystemAbstraction, StringLogger.DEV_NULL, null, StoreVersionMismatchHandler.THROW_EXCEPTION, monitors); // when int iterations = 2 * NUMBER_OF_RECORDS; long startingId = nodeStore.nextId(); long nodeId = startingId; for (int i = 0; i < iterations; i++) { NodeRecord record = new NodeRecord(nodeId, false, i, 0); record.setInUse(true); nodeStore.updateRecord(record); nodeId = nodeStore.nextId(); } // then NodeRecord record = new NodeRecord(0, false, 0, 0); for (int i = 0; i < iterations; i++) { record.setId(startingId + i); nodeStore.getRecord(i, record); assertTrue("record[" + i + "] should be in use", record.inUse()); assertThat( "record[" + i + "] should have nextRelId of " + i, record.getNextRel(), is((long) i)); } nodeStore.close(); }