@Override public void generate(String dbPath) { GraphDatabaseService graphDb = new EmbeddedGraphDatabase(dbPath); Index<Node> nodeIndex = graphDb .index() .forNodes("nodes", MapUtil.stringMap("provider", "lucene", "type", "fulltext")); Index<Relationship> relationshipIndex = graphDb .index() .forRelationships( "relationships", MapUtil.stringMap("provider", "lucene", "type", "fulltext")); Transaction tx = graphDb.beginTx(); try { Node n = graphDb.createNode(); Node n2 = graphDb.createNode(); Relationship rel = n.createRelationshipTo(n2, REL_TYPE); nodeIndex.add(n, "name", "alpha bravo"); nodeIndex.add(n2, "name", "charlie delta"); relationshipIndex.add(rel, "name", "echo foxtrot"); tx.success(); } finally { tx.finish(); } graphDb.shutdown(); }
@Test public void shouldSendAMessageFromAClientWhichIsReceivedByAServer() throws Exception { // given CountDownLatch latch = new CountDownLatch(1); LifeSupport life = new LifeSupport(); Server server1 = new Server( latch, MapUtil.stringMap( ClusterSettings.cluster_server.name(), "localhost:1234", ClusterSettings.server_id.name(), "1", ClusterSettings.initial_hosts.name(), "localhost:1234,localhost:1235")); life.add(server1); Server server2 = new Server( latch, MapUtil.stringMap( ClusterSettings.cluster_server.name(), "localhost:1235", ClusterSettings.server_id.name(), "2", ClusterSettings.initial_hosts.name(), "localhost:1234,localhost:1235")); life.add(server2); life.start(); // when server1.process( Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World")); // then latch.await(5, TimeUnit.SECONDS); assertTrue("server1 should have processed the message", server1.processedMessage()); assertTrue("server2 should have processed the message", server2.processedMessage()); life.shutdown(); }
private MasterTxIdGenerator newGenerator( int slaveCount, int replication, SlavePriority slavePriority, boolean... failingSlaves) throws Exception { slaves = instantiateSlaves(slaveCount, failingSlaves); dataSource = new FakeDataSource(); log = new FakeStringLogger(); Config config = new Config(MapUtil.stringMap(HaSettings.tx_push_factor.name(), "" + replication)); Neo4jJobScheduler scheduler = new Neo4jJobScheduler(); MasterTxIdGenerator result = new MasterTxIdGenerator( MasterTxIdGenerator.from(config, slavePriority), log, new Slaves() { @Override public Iterable<Slave> getSlaves() { return slaves; } }, new CommitPusher(scheduler)); // Life try { scheduler.init(); scheduler.start(); result.init(); result.start(); } catch (Throwable e) { throw Exceptions.launderedException(e); } return result; }
public void importa(BatchInserter inserter, LuceneBatchInserterIndexProvider indexProvider) { BatchInserterIndex pessoas = indexProvider.nodeIndex("pessoas", MapUtil.stringMap("type", "fulltext")); Scanner sc = new Scanner(ImportadorPessoas.class.getResourceAsStream("/import/pessoas")); System.out.println("Início da importação das pessoas"); while (sc.hasNextLine()) { String linha = sc.nextLine(); String[] informacoesDePessoa = linha.split(";"); long id = Long.parseLong(informacoesDePessoa[0]); String nome = informacoesDePessoa[1]; Integer idade = Integer.parseInt(informacoesDePessoa[2]); Map<String, Object> propriedades = MapUtil.map("nome", nome, "idade", idade); inserter.createNode(id, propriedades); pessoas.add(id, MapUtil.map("nome", nome)); inserter.createRelationship(0, id, Relacionamentos.PESSOA, null); } pessoas.flush(); System.out.println("Fim da importação das pessoas"); }
@Before public void before() throws Exception { dir = TargetDirectory.forTest(getClass()).directory("db", true); Map<String, String> configParams = MapUtil.stringMap(); Config config = new Config(configParams); DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory(); DefaultFileSystemAbstraction fs = new DefaultFileSystemAbstraction(); StoreFactory factory = new StoreFactory( config, idGeneratorFactory, new DefaultWindowPoolFactory(), fs, StringLogger.DEV_NULL, new DefaultTxHook()); String fileName = new File(dir, "arraystore").getAbsolutePath(); factory.createDynamicArrayStore(fileName, 120); arrayStore = new DynamicArrayStore( fileName, config, IdType.ARRAY_BLOCK, idGeneratorFactory, new DefaultWindowPoolFactory(), fs, StringLogger.DEV_NULL); }
public void importFromFile(String filePath) throws IOException { Map<String, Long> cache = new HashMap<String, Long>(COUNT); final File storeDir = new File(this.path); org.apache.commons.io.FileUtils.deleteDirectory(storeDir); BatchInserter batchInserter = new BatchInserterImpl(storeDir.getAbsolutePath()); final BatchInserterIndexProvider indexProvider = new LuceneBatchInserterIndexProvider(batchInserter); final BatchInserterIndex index = indexProvider.nodeIndex("nodes", MapUtil.stringMap("type", "exact")); BufferedReader reader = new BufferedReader(new FileReader(filePath)); String line = null; int nodes = 0; long time = System.currentTimeMillis(); long batchTime = time; while ((line = reader.readLine()) != null) { final String[] nodeNames = line.split("\\|"); final String name = nodeNames[0]; final Map<String, Object> props = MapUtil.map("name", name); final long node = batchInserter.createNode(props); index.add(node, props); cache.put(name, node); nodes++; if ((nodes % REPORT_COUNT) == 0) { System.out.printf( "%d nodes created. Took %d %n", nodes, (System.currentTimeMillis() - batchTime)); batchTime = System.currentTimeMillis(); } } System.out.println("Creating nodes took " + (System.currentTimeMillis() - time) / 1000); index.flush(); reader.close(); reader = new BufferedReader(new FileReader(filePath)); int rels = 0; time = System.currentTimeMillis(); batchTime = time; String relationshipType = "KNOWS"; while ((line = reader.readLine()) != null) { final String[] nodeNames = line.split("\\|"); final String name = nodeNames[0]; // final Long from = index.get("name", name).getSingle(); Long from = cache.get(name); for (int j = 1; j < nodeNames.length; j++) { // final Long to = index.get("name", nodeNames[j]).getSingle(); final Long to = cache.get(name); batchInserter.createRelationship( from, to, DynamicRelationshipType.withName(relationshipType), null); } rels++; if ((rels % REPORT_COUNT) == 0) { System.out.printf( "%d relationships created. Took %d %n", rels, (System.currentTimeMillis() - batchTime)); batchTime = System.currentTimeMillis(); } } System.out.println("Creating relationships took " + (System.currentTimeMillis() - time) / 1000); indexProvider.shutdown(); batchInserter.shutdown(); }
private void startMember(int serverId) throws URISyntaxException { Clusters.Member member = spec.getMembers().get(serverId - 1); StringBuilder initialHosts = new StringBuilder(spec.getMembers().get(0).getHost()); for (int i = 1; i < spec.getMembers().size(); i++) initialHosts.append(",").append(spec.getMembers().get(i).getHost()); if (member.isFullHaMember()) { int haPort = new URI("cluster://" + member.getHost()).getPort() + 3000; GraphDatabaseBuilder graphDatabaseBuilder = new HighlyAvailableGraphDatabaseFactory() .newHighlyAvailableDatabaseBuilder( new File(new File(root, name), "server" + serverId).getAbsolutePath()) .setConfig(ClusterSettings.cluster_name, name) .setConfig(ClusterSettings.initial_hosts, initialHosts.toString()) .setConfig(HaSettings.server_id, serverId + "") .setConfig(ClusterSettings.cluster_server, member.getHost()) .setConfig(HaSettings.ha_server, ":" + haPort) .setConfig(commonConfig); if (instanceConfig.containsKey(serverId)) { graphDatabaseBuilder.setConfig(instanceConfig.get(serverId)); } config(graphDatabaseBuilder, name, serverId); logger.info("Starting cluster node " + serverId + " in cluster " + name); final GraphDatabaseService graphDatabase = graphDatabaseBuilder.newGraphDatabase(); members.put(serverId, (HighlyAvailableGraphDatabase) graphDatabase); life.add( new LifecycleAdapter() { @Override public void stop() throws Throwable { graphDatabase.shutdown(); } }); } else { Map<String, String> config = MapUtil.stringMap( ClusterSettings.cluster_name.name(), name, ClusterSettings.initial_hosts.name(), initialHosts.toString(), ClusterSettings.cluster_server.name(), member.getHost()); Logging clientLogging = new Logging() { @Override public StringLogger getLogger(Class loggingClass) { return new Slf4jStringLogger(logger); } }; life.add( new ClusterClient( ClusterClient.adapt(new Config(config)), clientLogging, new CoordinatorIncapableCredentialsProvider())); } // logger.info( "Started cluster node " + serverId + " in cluster " // + name ); }
private static void initializeIndex() { indexProvider = new LuceneBatchInserterIndexProvider(inserter); nodeIndex = indexProvider.nodeIndex("nodeIndex", MapUtil.stringMap("type", "exact")); // TODO: Does this have an effect at all? nodeIndex.setCacheCapacity(NodeKeys.TYPE, 100000); nodeIndex.setCacheCapacity(NodeKeys.NAME, 100000); nodeIndex.setCacheCapacity(NodeKeys.CODE, 100000); }
static EmbeddedGraphDatabase startTemporaryDb( String targetDirectory, VerificationLevel verification) { if (verification != VerificationLevel.NONE) { return new EmbeddedGraphDatabase( targetDirectory, MapUtil.stringMap( Config.INTERCEPT_DESERIALIZED_TRANSACTIONS, "true", TransactionInterceptorProvider.class.getSimpleName() + "." + verification.interceptorName, verification.configValue)); } else return new EmbeddedGraphDatabase(targetDirectory); }
private Map<String, String> config( Class<? extends PropertyContainer> cls, String indexName, Map<String, String> config) { // TODO Doesn't look right if (config != null) { config = MapUtil.stringMap( new HashMap<String, String>(config), "provider", BerkeleyDbIndexImplementation.SERVICE_NAME); indexStore.setIfNecessary(cls, indexName, config); return config; } else { return indexStore.get(cls, indexName); } }
private Pair<Map<String, String>, Boolean /*true=needs to be set*/> findIndexConfig( Class<? extends PropertyContainer> cls, String indexName, Map<String, String> suppliedConfig, Map<?, ?> dbConfig) { // Check stored config (has this index been created previously?) Map<String, String> storedConfig = indexStore.get(cls, indexName); if (storedConfig != null && suppliedConfig == null) { // Fill in "provider" if not already filled in, backwards compatibility issue Map<String, String> newConfig = injectDefaultProviderIfMissing(cls, indexName, dbConfig, storedConfig); if (newConfig != storedConfig) { indexStore.set(cls, indexName, newConfig); } return Pair.of(newConfig, Boolean.FALSE); } Map<String, String> configToUse = suppliedConfig; // Check db config properties for provider String provider = null; IndexImplementation indexProvider = null; if (configToUse == null) { provider = getDefaultProvider(indexName, dbConfig); configToUse = MapUtil.stringMap(PROVIDER, provider); } else { provider = configToUse.get(PROVIDER); provider = provider == null ? getDefaultProvider(indexName, dbConfig) : provider; } indexProvider = getIndexProvider(provider); configToUse = indexProvider.fillInDefaults(configToUse); configToUse = injectDefaultProviderIfMissing(cls, indexName, dbConfig, configToUse); // Do they match (stored vs. supplied)? if (storedConfig != null) { assertConfigMatches(indexProvider, indexName, storedConfig, suppliedConfig); // Fill in "provider" if not already filled in, backwards compatibility issue Map<String, String> newConfig = injectDefaultProviderIfMissing(cls, indexName, dbConfig, storedConfig); if (newConfig != storedConfig) { indexStore.set(cls, indexName, newConfig); } configToUse = newConfig; } boolean needsToBeSet = !indexStore.has(cls, indexName); return Pair.of(Collections.unmodifiableMap(configToUse), needsToBeSet); }
@Test(expected = RestResultException.class) public void testFailingCreateNodeAndAddToIndex() throws Exception { RestIndex<Node> index = restAPI.createIndex( Node.class, "index", MapUtil.stringMap(IndexManager.PROVIDER, "lucene", "type", "fulltext_ _")); final Transaction tx = restAPI.beginTx(); Node n1 = restAPI.createNode(map()); index.add(n1, "key", "value"); tx.success(); tx.finish(); Node node = index.get("key", "value").getSingle(); assertEquals("created node found in index", n1, node); }
@Test public void givenClusterWithCreatedIndexWhenDeleteIndexOnMasterThenIndexIsDeletedOnSlave() throws Throwable { ClusterManager clusterManager = new ClusterManager( fromXml(getClass().getResource("/threeinstances.xml").toURI()), TargetDirectory.forTest(getClass()).cleanDirectory("testCluster"), MapUtil.stringMap( HaSettings.ha_server.name(), ":6001-6005", HaSettings.tx_push_factor.name(), "2")); try { // Given clusterManager.start(); clusterManager.getDefaultCluster().await(ClusterManager.allSeesAllAsAvailable()); GraphDatabaseAPI master = clusterManager.getDefaultCluster().getMaster(); try (Transaction tx = master.beginTx()) { master.index().forNodes("Test"); tx.success(); } HighlyAvailableGraphDatabase aSlave = clusterManager.getDefaultCluster().getAnySlave(); try (Transaction tx = aSlave.beginTx()) { assertThat(aSlave.index().existsForNodes("Test"), equalTo(true)); tx.success(); } // When try (Transaction tx = master.beginTx()) { master.index().forNodes("Test").delete(); tx.success(); } // Then HighlyAvailableGraphDatabase anotherSlave = clusterManager.getDefaultCluster().getAnySlave(); try (Transaction tx = anotherSlave.beginTx()) { assertThat(anotherSlave.index().existsForNodes("Test"), equalTo(false)); tx.success(); } } finally { clusterManager.stop(); } }
private ClusterClient newClusterClient(InstanceId id) { Map<String, String> configMap = MapUtil.stringMap( ClusterSettings.initial_hosts.name(), cluster.getInitialHostsConfigString(), ClusterSettings.server_id.name(), String.valueOf(id.toIntegerIndex()), ClusterSettings.cluster_server.name(), "0.0.0.0:8888"); Config config = new Config( configMap, InternalAbstractGraphDatabase.Configuration.class, GraphDatabaseSettings.class); return new ClusterClient( new Monitors(), ClusterClient.adapt(config), new DevNullLoggingService(), new NotElectableElectionCredentialsProvider(), new ObjectStreamFactory(), new ObjectStreamFactory()); }
@Before public void startCluster() throws Throwable { FileUtils.deleteDirectory(PATH); FileUtils.deleteDirectory(BACKUP_PATH); clusterManager = new ClusterManager( fromXml(getClass().getResource("/threeinstances.xml").toURI()), PATH, MapUtil.stringMap(OnlineBackupSettings.online_backup_enabled.name(), Settings.TRUE)) { @Override protected void config(GraphDatabaseBuilder builder, String clusterName, int serverId) { builder.setConfig(OnlineBackupSettings.online_backup_server, (":" + (4444 + serverId))); } }; clusterManager.start(); cluster = clusterManager.getDefaultCluster(); // Really doesn't matter which instance representation = createSomeData(cluster.getMaster()); }
@Test public void testSetBlockSize() throws Exception { targetDirectory.cleanup(); Config config = new Config( MapUtil.stringMap("string_block_size", "62", "array_block_size", "302"), GraphDatabaseSettings.class); StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, null); sf.createNeoStore(file("neo")).close(); initializeStores(); assertEquals(62 + AbstractDynamicStore.BLOCK_HEADER_SIZE, pStore.getStringBlockSize()); assertEquals(302 + AbstractDynamicStore.BLOCK_HEADER_SIZE, pStore.getArrayBlockSize()); ds.stop(); }
@SuppressWarnings("serial") @Ignore public class TestDoubleRecovery extends AbstractSubProcessTestBase { private static final byte[] NEOKERNL = {'N', 'E', 'O', 'K', 'E', 'R', 'N', 'L', '\0'}; private final CountDownLatch afterWrite = new CountDownLatch(1), afterCrash = new CountDownLatch(1); /* * 1) Do a 2PC transaction, crash when both resource have been prepared and txlog * says "mark as committing" for that tx. * 2) Do recovery and then crash again. * 3) Do recovery and see so that all data is in there. * Also do an incremental backup just to make sure that the logs have gotten the * right records injected. */ @Test public void crashAfter2PCMarkAsCommittingThenCrashAgainAndRecover() throws Exception { String backupDirectory = "target/var/backup-db"; FileUtils.deleteRecursively(new File(backupDirectory)); OnlineBackup.from(InetAddress.getLocalHost().getHostAddress()).full(backupDirectory); for (BreakPoint bp : breakpoints(0)) bp.enable(); runInThread(new WriteTransaction()); afterWrite.await(); startSubprocesses(); runInThread(new Crash()); afterCrash.await(); startSubprocesses(); OnlineBackup.from(InetAddress.getLocalHost().getHostAddress()).incremental(backupDirectory); run(new Verification()); GraphDatabaseAPI db = (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabase(backupDirectory); try { new Verification().run(db); } finally { db.shutdown(); } } static class WriteTransaction implements Task { @Override public void run(GraphDatabaseAPI graphdb) { Transaction tx = graphdb.beginTx(); Node node; try { // hack to get around another bug node = graphdb.createNode(); tx.success(); } finally { tx.finish(); } tx = graphdb.beginTx(); try { node.setProperty("correct", "yes"); graphdb.index().forNodes("nodes").add(node, "name", "value"); tx.success(); } finally { tx.finish(); } } } static class Crash implements Task { @Override public void run(GraphDatabaseAPI graphdb) { throw new AssertionError("Should not reach here - the breakpoint should avoid it"); } } static class Verification implements Task { @Override public void run(GraphDatabaseAPI graphdb) { assertNotNull("No graph database", graphdb); Index<Node> index = graphdb.index().forNodes("nodes"); assertNotNull("No index", index); Node node = index.get("name", "value").getSingle(); assertNotNull("could not get the node", node); assertEquals("yes", node.getProperty("correct")); } } private final BreakPoint ON_CRASH = new BreakPoint(Crash.class, "run", GraphDatabaseAPI.class) { @Override protected void callback(DebugInterface debug) throws KillSubProcess { afterCrash.countDown(); throw KillSubProcess.withExitCode(-1); } }; private final BreakPoint BEFORE_ANY_DATASOURCE_2PC = new BreakPoint(XaResourceHelpImpl.class, "commit", Xid.class, boolean.class) { @Override protected void callback(DebugInterface debug) throws KillSubProcess { if (twoPhaseCommitIn(debug.thread())) { debug.thread().suspend(null); this.disable(); afterWrite.countDown(); throw KillSubProcess.withExitCode(-1); } } private boolean twoPhaseCommitIn(DebuggedThread thread) { return !Boolean.parseBoolean(thread.getLocal(1, "onePhase")); } }; private final BreakPoint[] breakpointsForBefore2PC = new BreakPoint[] {ON_CRASH, BEFORE_ANY_DATASOURCE_2PC}; @Override protected BreakPoint[] breakpoints(int id) { return breakpointsForBefore2PC; } private final Bootstrapper bootstrap = bootstrap( this, MapUtil.stringMap(OnlineBackupSettings.online_backup_enabled.name(), Settings.TRUE)); @Override protected Bootstrapper bootstrap(int id) throws IOException { return bootstrap; } private static Bootstrapper bootstrap(TestDoubleRecovery test, Map<String, String> config) { try { return new Bootstrapper(test, 0, config) { @Override protected void shutdown(GraphDatabaseService graphdb, boolean normal) { if (normal) super.shutdown(graphdb, normal); } }; } catch (IOException e) { throw new RuntimeException(e); } } /** * Create a log file that fixes a store that has been subject to this issue. * * <p>Parameters: [filename] [globalId.time] [globalId.sequence] * * <p>Example: TestDoubleRecovery tm_tx_log.1 661819753510181175 3826 */ public static void main(String... args) throws Exception { GraphDatabaseAPI graphdb = (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabase("target/test-data/junk"); try { new WriteTransaction().run(graphdb); } finally { graphdb.shutdown(); } TxLog log = new TxLog(new File(args[0]), new DefaultFileSystemAbstraction()); byte globalId[] = new byte[NEOKERNL.length + 16]; System.arraycopy(NEOKERNL, 0, globalId, 0, NEOKERNL.length); ByteBuffer byteBuf = ByteBuffer.wrap(globalId); byteBuf.position(NEOKERNL.length); byteBuf.putLong(Long.parseLong(args[1])).putLong(Long.parseLong(args[2])); log.txStart(globalId); log.addBranch(globalId, UTF8.encode("414141")); log.addBranch(globalId, LuceneDataSource.DEFAULT_BRANCH_ID); log.markAsCommitting(globalId, ForceMode.unforced); log.force(); log.close(); } }
private void initializeStores() throws IOException { LockManager lockManager = new LockManagerImpl(new RagManager()); final Config config = new Config( MapUtil.stringMap( InternalAbstractGraphDatabase.Configuration.store_dir.name(), path.getPath(), InternalAbstractGraphDatabase.Configuration.neo_store.name(), "neo", InternalAbstractGraphDatabase.Configuration.logical_log.name(), file("nioneo_logical.log").getPath()), GraphDatabaseSettings.class); StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, null); NodeManager nodeManager = mock(NodeManager.class); @SuppressWarnings("rawtypes") List caches = Arrays.asList((Cache) mock(AutoLoadingCache.class), (Cache) mock(AutoLoadingCache.class)); when(nodeManager.caches()).thenReturn(caches); ds = new NeoStoreXaDataSource( config, sf, StringLogger.DEV_NULL, new XaFactory( config, TxIdGenerator.DEFAULT, new PlaceboTm(lockManager, TxIdGenerator.DEFAULT), fs.get(), new Monitors(), new DevNullLoggingService(), RecoveryVerifier.ALWAYS_VALID, LogPruneStrategies.NO_PRUNING), TransactionStateFactory.noStateFactory(new DevNullLoggingService()), new TransactionInterceptorProviders( Collections.<TransactionInterceptorProvider>emptyList(), dependencyResolverForConfig(config)), null, new SingleLoggingService(DEV_NULL), new KernelSchemaStateStore(), mock(TokenNameLookup.class), dependencyResolverForNoIndexProvider(nodeManager), mock(AbstractTransactionManager.class), mock(PropertyKeyTokenHolder.class), mock(LabelTokenHolder.class), mock(RelationshipTypeTokenHolder.class), mock(PersistenceManager.class), mock(LockManager.class), mock(SchemaWriteGuard.class)); ds.init(); ds.start(); xaCon = ds.getXaConnection(); pStore = xaCon.getPropertyStore(); rtStore = xaCon.getRelationshipTypeStore(); }
@Test @EnabledBreakpoints({ "makeSureNextTransactionIsFullyFetched", "readNextChunk", "waitTxCopyToStart", "finish" }) public void testTransactionsPulled() throws Exception { final HighlyAvailableGraphDatabase master = new HighlyAvailableGraphDatabase( TargetDirectory.forTest(TestClientThreadIsolation.class) .directory("master", true) .getAbsolutePath(), MapUtil.stringMap( HaConfig.CONFIG_KEY_COORDINATORS, zoo.getConnectionString(), HaConfig.CONFIG_KEY_SERVER_ID, "1")); final HighlyAvailableGraphDatabase slave1 = new HighlyAvailableGraphDatabase( TargetDirectory.forTest(TestClientThreadIsolation.class) .directory("slave1", true) .getAbsolutePath(), MapUtil.stringMap( HaConfig.CONFIG_KEY_COORDINATORS, zoo.getConnectionString(), HaConfig.CONFIG_KEY_SERVER_ID, "2", HaConfig.CONFIG_KEY_MAX_CONCURRENT_CHANNELS_PER_SLAVE, "2")); Transaction masterTx = master.beginTx(); master .createNode() .createRelationshipTo(master.createNode(), DynamicRelationshipType.withName("master")) .setProperty("largeArray", new int[20000]); masterTx.success(); masterTx.finish(); // Simple sanity check assertEquals(1, master.getBroker().getMaster().other().getMachineId()); assertEquals(1, slave1.getBroker().getMaster().other().getMachineId()); Thread thread1 = new Thread( new Runnable() { public void run() { // TODO Figure out how to do this // Master masterClient = slave1.getBroker().getMaster().first(); // Response<Integer> response = masterClient.createRelationshipType( // slave1.getSlaveContext( 10 ), "name" ); // slave1.receive( response ); // will be suspended here // response.close(); } }, "thread 1"); Thread thread2 = new Thread( new Runnable() { public void run() { /* * We have two operations since we need to make sure this test passes * before and after the proper channel releasing fix. The issue is * that we can't have only one channel since it will deadlock because * the txCopyingThread is suspended and won't release the channel * (after the fix). But the problem is that with two channels going * before the fix it won't break because the RR policy in * ResourcePool will give the unused channel to the new requesting thread, * thus not triggering the bug. The solution is to do two requests so * eventually get the released, half consumed channel. */ // TODO Figure out how to do this // try // { // waitTxCopyToStart(); // Master masterClient = slave1.getBroker().getMaster().first(); // SlaveContext ctx = slave1.getSlaveContext( 11 ); // Response<Integer> response = // masterClient.createRelationshipType( // ctx, "name2" ); // slave1.receive( response ); // response.close(); // // // This will break before the fix // response = masterClient.createRelationshipType( // slave1.getSlaveContext( 12 ), "name3" ); // slave1.receive( response ); // response.close(); // // /* // * If the above fails, this won't happen. Used to fail the // * test gracefully // */ // Transaction masterTx = master.beginTx(); // master.getReferenceNode().createRelationshipTo( // master.createNode(), // DynamicRelationshipType.withName( "test" ) ); // masterTx.success(); // masterTx.finish(); // } // finally // { // finish(); // } } }, "thread 2"); thread1.start(); thread2.start(); thread1.join(); thread2.join(); assertTrue( master .getReferenceNode() .getRelationships(DynamicRelationshipType.withName("test")) .iterator() .hasNext()); }
@Test public void providerGetsFilledInAutomatically() { Map<String, String> correctConfig = MapUtil.stringMap("type", "exact", IndexManager.PROVIDER, "lucene"); File storeDir = new File("target/var/index"); Neo4jTestCase.deleteFileOrDirectory(storeDir); GraphDatabaseService graphDb = new EmbeddedGraphDatabase(storeDir.getPath()); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forNodes("default"))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb.index().forNodes("wo-provider", MapUtil.stringMap("type", "exact")))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forNodes( "w-provider", MapUtil.stringMap("type", "exact", IndexManager.PROVIDER, "lucene")))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forRelationships("default"))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forRelationships("wo-provider", MapUtil.stringMap("type", "exact")))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forRelationships( "w-provider", MapUtil.stringMap("type", "exact", IndexManager.PROVIDER, "lucene")))); graphDb.shutdown(); removeProvidersFromIndexDbFile(storeDir); graphDb = new EmbeddedGraphDatabase(storeDir.getPath()); // Getting the index w/o exception means that the provider has been reinstated assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forNodes("default"))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb.index().forNodes("wo-provider", MapUtil.stringMap("type", "exact")))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forNodes( "w-provider", MapUtil.stringMap("type", "exact", IndexManager.PROVIDER, "lucene")))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forRelationships("default"))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forRelationships("wo-provider", MapUtil.stringMap("type", "exact")))); assertEquals( correctConfig, graphDb .index() .getConfiguration( graphDb .index() .forRelationships( "w-provider", MapUtil.stringMap("type", "exact", IndexManager.PROVIDER, "lucene")))); graphDb.shutdown(); removeProvidersFromIndexDbFile(storeDir); graphDb = new EmbeddedGraphDatabase(storeDir.getPath()); // Getting the index w/o exception means that the provider has been reinstated assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forNodes("default"))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forNodes("wo-provider"))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forNodes("w-provider"))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forRelationships("default"))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forRelationships("wo-provider"))); assertEquals( correctConfig, graphDb.index().getConfiguration(graphDb.index().forRelationships("w-provider"))); graphDb.shutdown(); }
public Index<Node> createNodeFullTextIndex(String named) { return database .getIndexManager() .forNodes(named, MapUtil.stringMap(IndexManager.PROVIDER, "lucene", "type", "fulltext")); }
public Index<Relationship> createRelationshipFullTextIndex(String named) { return database .getIndexManager() .forRelationships( named, MapUtil.stringMap(IndexManager.PROVIDER, "lucene", "type", "fulltext")); }
public class DefaultUdcInformationCollector implements UdcInformationCollector { private final Config config; @SuppressWarnings("deprecation") private final KernelData kernel; private final NodeManager nodeManager; private String storeId; private boolean crashPing; public DefaultUdcInformationCollector( Config config, XaDataSourceManager xadsm, @SuppressWarnings("deprecation") KernelData kernel) { this.config = config; this.kernel = kernel; nodeManager = kernel.graphDatabase().getDependencyResolver().resolveDependency(NodeManager.class); if (xadsm != null) { xadsm.addDataSourceRegistrationListener( new DataSourceRegistrationListener() { @Override public void registeredDataSource(XaDataSource ds) { if (ds instanceof NeoStoreXaDataSource) { crashPing = ds.getXaContainer().getLogicalLog().wasNonClean(); storeId = Long.toHexString(ds.getRandomIdentifier()); } } @Override public void unregisteredDataSource(XaDataSource ds) { if (ds instanceof NeoStoreXaDataSource) { crashPing = false; storeId = null; } } }); } } public static String filterVersionForUDC(String version) { if (!version.contains("+")) { return version; } return version.substring(0, version.indexOf("+")); } @Override public Map<String, String> getUdcParams() { String classPath = getClassPath(); Map<String, String> udcFields = new HashMap<>(); add(udcFields, ID, storeId); add(udcFields, VERSION, filterVersionForUDC(kernel.version().getReleaseVersion())); add(udcFields, REVISION, filterVersionForUDC(kernel.version().getRevision())); add(udcFields, EDITION, determineEdition(classPath)); add(udcFields, TAGS, determineTags(jarNamesForTags, classPath)); add(udcFields, CLUSTER_HASH, determineClusterNameHash()); add(udcFields, SOURCE, config.get(UdcSettings.udc_source)); add(udcFields, REGISTRATION, config.get(UdcSettings.udc_registration_key)); add(udcFields, DISTRIBUTION, determineOsDistribution()); add(udcFields, USER_AGENTS, determineUserAgents()); add(udcFields, MAC, determineMacAddress()); add(udcFields, NUM_PROCESSORS, determineNumberOfProcessors()); add(udcFields, TOTAL_MEMORY, determineTotalMemory()); add(udcFields, HEAP_SIZE, determineHeapSize()); add(udcFields, NODE_IDS_IN_USE, determineNodesIdsInUse()); add(udcFields, RELATIONSHIP_IDS_IN_USE, determineRelationshipIdsInUse()); add(udcFields, LABEL_IDS_IN_USE, determineLabelIdsInUse()); add(udcFields, PROPERTY_IDS_IN_USE, determinePropertyIdsInUse()); udcFields.putAll(determineSystemProperties()); return udcFields; } private String determineOsDistribution() { if (System.getProperties().getProperty("os.name", "").equals("Linux")) { return searchForPackageSystems(); } else { return UNKNOWN_DIST; } } static String searchForPackageSystems() { try { if (new File("/bin/rpm").exists()) { return "rpm"; } if (new File("/usr/bin/dpkg").exists()) { return "dpkg"; } } catch (Exception e) { // ignore } return UNKNOWN_DIST; } private Integer determineClusterNameHash() { try { Class<?> haSettings = Class.forName("org.neo4j.kernel.ha.HaSettings"); @SuppressWarnings("unchecked") Setting<String> setting = (Setting<String>) haSettings.getField("cluster_name").get(null); String name = config.get(setting); return name != null ? Math.abs(name.hashCode() % Integer.MAX_VALUE) : null; } catch (Exception e) { return null; } } private org.neo4j.ext.udc.Edition determineEdition(String classPath) { if (classPath.contains("neo4j-ha")) { return org.neo4j.ext.udc.Edition.enterprise; } if (classPath.contains("neo4j-management")) { return org.neo4j.ext.udc.Edition.advanced; } return Edition.community; } private final Map<String, String> jarNamesForTags = MapUtil.stringMap( "spring-", "spring", "(javax.ejb|ejb-jar)", "ejb", "(weblogic|glassfish|websphere|jboss)", "appserver", "openshift", "openshift", "cloudfoundry", "cloudfoundry", "(junit|testng)", "test", "jruby", "ruby", "clojure", "clojure", "jython", "python", "groovy", "groovy", "(tomcat|jetty)", "web", "spring-data-neo4j", "sdn"); private String determineTags(Map<String, String> jarNamesForTags, String classPath) { StringBuilder result = new StringBuilder(); for (Map.Entry<String, String> entry : jarNamesForTags.entrySet()) { final Pattern pattern = Pattern.compile(entry.getKey()); if (pattern.matcher(classPath).find()) { result.append(",").append(entry.getValue()); } } if (result.length() == 0) { return null; } return result.substring(1); } private String getClassPath() { RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); return runtime.getClassPath(); } private String determineMacAddress() { String formattedMac = "0"; try { InetAddress address = InetAddress.getLocalHost(); NetworkInterface ni = NetworkInterface.getByInetAddress(address); if (ni != null) { byte[] mac = ni.getHardwareAddress(); if (mac != null) { StringBuilder sb = new StringBuilder(mac.length * 2); Formatter formatter = new Formatter(sb); for (byte b : mac) { formatter.format("%02x", b); } formattedMac = sb.toString(); } } } catch (Throwable t) { // } return formattedMac; } private String determineUserAgents() { try { Class<?> filterClass = Class.forName("org.neo4j.server.rest.web.CollectUserAgentFilter"); Object filterInstance = filterClass.getMethod("instance").invoke(null); Object agents = filterClass.getMethod("getUserAgents").invoke(filterInstance); String result = toCommaString(agents); filterClass.getMethod("reset").invoke(filterInstance); return result; } catch (Exception e) { return null; } } private int determineNumberOfProcessors() { return Runtime.getRuntime().availableProcessors(); } private long determineTotalMemory() { return ((OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean()) .getTotalPhysicalMemorySize(); } private long determineHeapSize() { return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed(); } private long determineNodesIdsInUse() { return getNumberOfIdsInUse(Node.class); } private long determineLabelIdsInUse() { return getNumberOfIdsInUse(Label.class); } private long determinePropertyIdsInUse() { return getNumberOfIdsInUse(PropertyStore.class); } private long determineRelationshipIdsInUse() { return getNumberOfIdsInUse(Relationship.class); } private long getNumberOfIdsInUse(Class<?> clazz) { return nodeManager.getNumberOfIdsInUse(clazz); } private String toCommaString(Object values) { StringBuilder result = new StringBuilder(); if (values instanceof Iterable) { for (Object agent : (Iterable) values) { if (agent == null) { continue; } if (result.length() > 0) { result.append(","); } result.append(agent); } } else { result.append(values); } return result.toString(); } private void add(Map<String, String> udcFields, String name, Object value) { if (value == null) { return; } String str = value.toString().trim(); if (str.isEmpty()) { return; } udcFields.put(name, str); } private String removeUdcPrefix(String propertyName) { if (propertyName.startsWith(UDC_PROPERTY_PREFIX)) { return propertyName.substring(UDC_PROPERTY_PREFIX.length() + 1); } return propertyName; } private String sanitizeUdcProperty(String propertyValue) { return propertyValue.replace(' ', '_'); } private Map<String, String> determineSystemProperties() { Map<String, String> relevantSysProps = new HashMap<>(); Properties sysProps = System.getProperties(); Enumeration sysPropsNames = sysProps.propertyNames(); while (sysPropsNames.hasMoreElements()) { String sysPropName = (String) sysPropsNames.nextElement(); if (sysPropName.startsWith(UDC_PROPERTY_PREFIX) || sysPropName.startsWith(OS_PROPERTY_PREFIX)) { String propertyValue = sysProps.getProperty(sysPropName); relevantSysProps.put(removeUdcPrefix(sysPropName), sanitizeUdcProperty(propertyValue)); } } return relevantSysProps; } @Override public String getStoreId() { return storeId; } @Override public boolean getCrashPing() { return crashPing; } }
public static void main(String[] args) { if (args.length != 3) { System.out.println( "This program expects the following parameters: \n" + "1. Folder name with all the .gbk files \n" + "2. Bio4j DB folder \n" + "3. batch inserter .properties file"); } else { File currentFolder = new File(args[0]); File[] files = currentFolder.listFiles(); BatchInserter inserter = null; BatchInserterIndexProvider indexProvider = null; // ---------------------------------------------------------------------------------- // ---------------------initializing node type properties---------------------------- genomeElementProperties.put( GenomeElementNode.NODE_TYPE_PROPERTY, GenomeElementNode.NODE_TYPE); geneProperties.put(GeneNode.NODE_TYPE_PROPERTY, GeneNode.NODE_TYPE); cdsProperties.put(CDSNode.NODE_TYPE_PROPERTY, CDSNode.NODE_TYPE); miscRnaProperties.put(MiscRNANode.NODE_TYPE_PROPERTY, MiscRNANode.NODE_TYPE); mRnaProperties.put(MRNANode.NODE_TYPE_PROPERTY, MRNANode.NODE_TYPE); ncRnaProperties.put(NcRNANode.NODE_TYPE_PROPERTY, NcRNANode.NODE_TYPE); rRnaProperties.put(RRNANode.NODE_TYPE_PROPERTY, RRNANode.NODE_TYPE); tmRnaProperties.put(TmRNANode.NODE_TYPE_PROPERTY, TmRNANode.NODE_TYPE); tRnaProperties.put(TRNANode.NODE_TYPE_PROPERTY, TRNANode.NODE_TYPE); // ---------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------- try { // This block configures the logger with handler and formatter fh = new FileHandler("ImportRefSeq.log", false); SimpleFormatter formatter = new SimpleFormatter(); fh.setFormatter(formatter); logger.addHandler(fh); logger.setLevel(Level.ALL); // create the batch inserter inserter = BatchInserters.inserter(args[1], MapUtil.load(new File(args[2]))); // create the batch index service indexProvider = new LuceneBatchInserterIndexProvider(inserter); // -----------------create batch indexes---------------------------------- // ---------------------------------------------------------------------- BatchInserterIndex genomeElementVersionIndex = indexProvider.nodeIndex( GenomeElementNode.GENOME_ELEMENT_VERSION_INDEX, MapUtil.stringMap(PROVIDER_ST, LUCENE_ST, TYPE_ST, EXACT_ST)); BatchInserterIndex nodeTypeIndex = indexProvider.nodeIndex( Bio4jManager.NODE_TYPE_INDEX_NAME, MapUtil.stringMap(PROVIDER_ST, LUCENE_ST, TYPE_ST, EXACT_ST)); for (File file : files) { if (file.getName().endsWith(".gbff")) { logger.log(Level.INFO, ("file: " + file.getName())); BufferedReader reader = new BufferedReader(new FileReader(file)); String line = null; while ((line = reader.readLine()) != null) { // this is the first line where the locus is String accessionSt = ""; String definitionSt = ""; String versionSt = ""; String commentSt = ""; StringBuilder seqStBuilder = new StringBuilder(); ArrayList<String> cdsList = new ArrayList<String>(); ArrayList<String> geneList = new ArrayList<String>(); ArrayList<String> miscRnaList = new ArrayList<String>(); ArrayList<String> mRnaList = new ArrayList<String>(); ArrayList<String> ncRnaList = new ArrayList<String>(); ArrayList<String> rRnaList = new ArrayList<String>(); ArrayList<String> tmRnaList = new ArrayList<String>(); ArrayList<String> tRnaList = new ArrayList<String>(); boolean originFound = false; // Now I get all the lines till I reach the string '//' do { boolean readLineFlag = true; if (line.startsWith(GBCommon.LOCUS_STR)) { // do nothing right now } else if (line.startsWith(GBCommon.ACCESSION_STR)) { accessionSt = line.split(GBCommon.ACCESSION_STR)[1].trim(); } else if (line.startsWith(GBCommon.VERSION_STR)) { versionSt = line.split(GBCommon.VERSION_STR)[1].trim().split(" ")[0]; } else if (line.startsWith(GBCommon.DEFINITION_STR)) { definitionSt += line.split(GBCommon.DEFINITION_STR)[1].trim(); do { line = reader.readLine(); if (line.startsWith(" ")) { definitionSt += line.trim(); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.COMMENT_STR)) { commentSt += line.split(GBCommon.COMMENT_STR)[1].trim(); do { line = reader.readLine(); if (line.startsWith(" ")) { commentSt += "\n" + line.trim(); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.FEATURES_STR)) { do { line = reader.readLine(); String lineSubstr5 = line.substring(5); if (lineSubstr5.startsWith(GBCommon.CDS_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.CDS_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } cdsList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.GENE_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.GENE_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } geneList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.MISC_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.MISC_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } miscRnaList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.TM_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.TM_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } tmRnaList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.R_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.R_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } rRnaList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.M_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.M_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } mRnaList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.NC_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.NC_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } ncRnaList.add(positionsSt); } else if (lineSubstr5.startsWith(GBCommon.T_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.T_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } tRnaList.add(positionsSt); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.ORIGIN_STR)) { originFound = true; do { line = reader.readLine(); String[] tempArray = line.trim().split(" "); for (int i = 1; i < tempArray.length; i++) { seqStBuilder.append(tempArray[i]); } } while (line.startsWith(" ")); readLineFlag = false; } if (readLineFlag) { line = reader.readLine(); } } while (line != null && !line.startsWith(GBCommon.LAST_LINE_STR)); // --------create genome element node-------------- long genomeElementId = createGenomeElementNode( versionSt, commentSt, definitionSt, inserter, genomeElementVersionIndex, nodeTypeIndex); // -----------genes----------------- for (String genePositionsSt : geneList) { geneProperties.put(GeneNode.POSITIONS_PROPERTY, genePositionsSt); long geneId = inserter.createNode(geneProperties); inserter.createRelationship(genomeElementId, geneId, genomeElementGeneRel, null); // indexing gene node by its node_type nodeTypeIndex.add( geneId, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, GeneNode.NODE_TYPE)); } // -----------CDS----------------- for (String cdsPositionsSt : cdsList) { cdsProperties.put(CDSNode.POSITIONS_PROPERTY, cdsPositionsSt); long cdsID = inserter.createNode(cdsProperties); inserter.createRelationship(genomeElementId, cdsID, genomeElementCDSRel, null); // indexing CDS node by its node_type nodeTypeIndex.add( cdsID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, CDSNode.NODE_TYPE)); } // -----------misc rna----------------- for (String miscRnaPositionsSt : miscRnaList) { miscRnaProperties.put(MiscRNANode.POSITIONS_PROPERTY, miscRnaPositionsSt); long miscRnaID = inserter.createNode(miscRnaProperties); inserter.createRelationship( genomeElementId, miscRnaID, genomeElementMiscRnaRel, null); // indexing MiscRNA node by its node_type nodeTypeIndex.add( miscRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, MiscRNANode.NODE_TYPE)); } // -----------m rna----------------- for (String mRnaPositionsSt : mRnaList) { mRnaProperties.put(MRNANode.POSITIONS_PROPERTY, mRnaPositionsSt); long mRnaID = inserter.createNode(mRnaProperties); inserter.createRelationship(genomeElementId, mRnaID, genomeElementMRnaRel, null); // indexing MRNA node by its node_type nodeTypeIndex.add( mRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, MRNANode.NODE_TYPE)); } // -----------nc rna----------------- for (String ncRnaPositionsSt : ncRnaList) { ncRnaProperties.put(NcRNANode.POSITIONS_PROPERTY, ncRnaPositionsSt); long ncRnaID = inserter.createNode(ncRnaProperties); inserter.createRelationship(genomeElementId, ncRnaID, genomeElementNcRnaRel, null); // indexing NCRNA node by its node_type nodeTypeIndex.add( ncRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, NcRNANode.NODE_TYPE)); } // -----------r rna----------------- for (String rRnaPositionsSt : rRnaList) { rRnaProperties.put(RRNANode.POSITIONS_PROPERTY, rRnaPositionsSt); long rRnaID = inserter.createNode(rRnaProperties); inserter.createRelationship(genomeElementId, rRnaID, genomeElementRRnaRel, null); // indexing RRNA node by its node_type nodeTypeIndex.add( rRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, RRNANode.NODE_TYPE)); } // -----------tm rna----------------- for (String tmRnaPositionsSt : tmRnaList) { tmRnaProperties.put(TmRNANode.POSITIONS_PROPERTY, tmRnaPositionsSt); long tmRnaID = inserter.createNode(tmRnaProperties); inserter.createRelationship(genomeElementId, tmRnaID, genomeElementTmRnaRel, null); // indexing TmRNA node by its node_type nodeTypeIndex.add( tmRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, TmRNANode.NODE_TYPE)); } // -----------t rna----------------- for (String tRnaPositionsSt : tRnaList) { tRnaProperties.put(TRNANode.POSITIONS_PROPERTY, tRnaPositionsSt); long tRnaID = inserter.createNode(tRnaProperties); inserter.createRelationship(genomeElementId, tRnaID, genomeElementTRnaRel, null); // indexing TRNA node by its node_type nodeTypeIndex.add( tRnaID, MapUtil.map(Bio4jManager.NODE_TYPE_INDEX_NAME, TRNANode.NODE_TYPE)); } logger.log(Level.INFO, (versionSt + " saved!")); } } } } catch (Exception e) { logger.log(Level.SEVERE, e.getMessage()); StackTraceElement[] trace = e.getStackTrace(); for (StackTraceElement stackTraceElement : trace) { logger.log(Level.SEVERE, stackTraceElement.toString()); } } finally { // shutdown, makes sure all changes are written to disk indexProvider.shutdown(); inserter.shutdown(); // closing logger file handler fh.close(); } } }
public static void main(String[] args) { File currentFolder = new File("."); File[] files = currentFolder.listFiles(); BatchInserter inserter = null; BatchInserterIndexProvider indexProvider = null; // ---------------------------------------------------------------------------------- // ---------------------initializing node type properties---------------------------- genomeElementProperties.put(GenomeElementNode.NODE_TYPE_PROPERTY, GenomeElementNode.NODE_TYPE); geneProperties.put(GeneNode.NODE_TYPE_PROPERTY, GeneNode.NODE_TYPE); cdsProperties.put(CDSNode.NODE_TYPE_PROPERTY, CDSNode.NODE_TYPE); miscRnaProperties.put(MiscRNANode.NODE_TYPE_PROPERTY, MiscRNANode.NODE_TYPE); mRnaProperties.put(MRNANode.NODE_TYPE_PROPERTY, MRNANode.NODE_TYPE); ncRnaProperties.put(NcRNANode.NODE_TYPE_PROPERTY, NcRNANode.NODE_TYPE); rRnaProperties.put(RRNANode.NODE_TYPE_PROPERTY, RRNANode.NODE_TYPE); tmRnaProperties.put(TmRNANode.NODE_TYPE_PROPERTY, TmRNANode.NODE_TYPE); tRnaProperties.put(TRNANode.NODE_TYPE_PROPERTY, TRNANode.NODE_TYPE); // ---------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------- try { // This block configures the logger with handler and formatter fh = new FileHandler("ImportGenbank.log", false); SimpleFormatter formatter = new SimpleFormatter(); fh.setFormatter(formatter); logger.addHandler(fh); logger.setLevel(Level.ALL); // create the batch inserter inserter = new BatchInserterImpl( CommonData.DATABASE_FOLDER, BatchInserterImpl.loadProperties(CommonData.PROPERTIES_FILE_NAME)); // create the batch index service indexProvider = new LuceneBatchInserterIndexProvider(inserter); // -----------------create batch indexes---------------------------------- // ---------------------------------------------------------------------- BatchInserterIndex genomeElementVersionIndex = indexProvider.nodeIndex( GenomeElementNode.GENOME_ELEMENT_VERSION_INDEX, MapUtil.stringMap(PROVIDER_ST, LUCENE_ST, TYPE_ST, EXACT_ST)); for (File file : files) { if (file.getName().endsWith(".gbff")) { BufferedReader reader = new BufferedReader(new FileReader(file)); String line = null; while ((line = reader.readLine()) != null) { // this is the first line where the locus is String accessionSt = ""; String definitionSt = ""; String versionSt = ""; String commentSt = ""; StringBuilder seqStBuilder = new StringBuilder(); ArrayList<String> cdsList = new ArrayList<String>(); ArrayList<String> geneList = new ArrayList<String>(); ArrayList<String> miscRnaList = new ArrayList<String>(); ArrayList<String> mRnaList = new ArrayList<String>(); ArrayList<String> ncRnaList = new ArrayList<String>(); ArrayList<String> rRnaList = new ArrayList<String>(); ArrayList<String> tmRnaList = new ArrayList<String>(); ArrayList<String> tRnaList = new ArrayList<String>(); boolean originFound = false; // Now I get all the lines till I reach the string '//' do { boolean readLineFlag = true; if (line.startsWith(GBCommon.LOCUS_STR)) { // do nothing right now } else if (line.startsWith(GBCommon.ACCESSION_STR)) { accessionSt = line.split(GBCommon.ACCESSION_STR)[1].trim(); } else if (line.startsWith(GBCommon.VERSION_STR)) { versionSt = line.split(GBCommon.VERSION_STR)[1].trim().split(" ")[0]; } else if (line.startsWith(GBCommon.DEFINITION_STR)) { definitionSt += line.split(GBCommon.DEFINITION_STR)[1].trim(); do { line = reader.readLine(); if (line.startsWith(" ")) { definitionSt += line.trim(); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.COMMENT_STR)) { commentSt += line.split(GBCommon.COMMENT_STR)[1].trim(); do { line = reader.readLine(); if (line.startsWith(" ")) { commentSt += "\n" + line.trim(); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.FEATURES_STR)) { do { line = reader.readLine(); if (line.trim().startsWith(GBCommon.CDS_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.CDS_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } cdsList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.GENE_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.GENE_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } geneList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.MISC_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.MISC_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } miscRnaList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.TM_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.TM_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } tmRnaList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.R_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.R_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } rRnaList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.M_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.M_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } mRnaList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.NC_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.NC_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } ncRnaList.add(positionsSt); } else if (line.trim().startsWith(GBCommon.T_RNA_STR)) { String positionsSt = ""; positionsSt += line.trim().split(GBCommon.T_RNA_STR)[1].trim(); line = reader.readLine(); while (!line.trim().startsWith("/")) { positionsSt += line.trim(); line = reader.readLine(); } tRnaList.add(positionsSt); } } while (line.startsWith(" ")); readLineFlag = false; } else if (line.startsWith(GBCommon.ORIGIN_STR)) { // sequence originFound = true; do { line = reader.readLine(); String[] tempArray = line.trim().split(" "); for (int i = 1; i < tempArray.length; i++) { seqStBuilder.append(tempArray[i]); } } while (line.startsWith(" ")); readLineFlag = false; } if (readLineFlag) { line = reader.readLine(); } } while (line != null && !line.startsWith(GBCommon.LAST_LINE_STR)); // -----we only save the data when the sequence is found------------ if (originFound) { System.out.println("accessionSt = " + accessionSt); System.out.println("versionSt = " + versionSt); System.out.println("definitionSt = " + definitionSt); System.out.println("commentSt = " + commentSt); System.out.println("sequence.length = " + seqStBuilder.toString().length()); System.out.println("geneList = " + geneList); System.out.println("cdsList = " + cdsList); System.out.println("miscRnaList = " + miscRnaList); System.out.println("mRnaList = " + mRnaList); System.out.println("ncRnaList = " + ncRnaList); System.out.println("rRnaList = " + rRnaList); System.out.println("tmRnaList = " + tmRnaList); System.out.println("tRnaList = " + tRnaList); // --------create genome element node-------------- long genomeElementId = createGenomeElementNode( versionSt, commentSt, definitionSt, inserter, genomeElementVersionIndex); // -----------genes----------------- for (String genePositionsSt : geneList) { geneProperties.put(GeneNode.POSITIONS_PROPERTY, genePositionsSt); long geneId = inserter.createNode(geneProperties); inserter.createRelationship(genomeElementId, geneId, genomeElementGeneRel, null); } // -----------CDS----------------- for (String cdsPositionsSt : cdsList) { cdsProperties.put(CDSNode.POSITIONS_PROPERTY, cdsPositionsSt); long cdsID = inserter.createNode(cdsProperties); inserter.createRelationship(genomeElementId, cdsID, genomeElementCDSRel, null); } // -----------misc rna----------------- for (String miscRnaPositionsSt : miscRnaList) { miscRnaProperties.put(MiscRNANode.POSITIONS_PROPERTY, miscRnaPositionsSt); long miscRnaID = inserter.createNode(miscRnaProperties); inserter.createRelationship( genomeElementId, miscRnaID, genomeElementMiscRnaRel, null); } // -----------m rna----------------- for (String mRnaPositionsSt : mRnaList) { mRnaProperties.put(MRNANode.POSITIONS_PROPERTY, mRnaPositionsSt); long mRnaID = inserter.createNode(mRnaProperties); inserter.createRelationship(genomeElementId, mRnaID, genomeElementMRnaRel, null); } // -----------nc rna----------------- for (String ncRnaPositionsSt : ncRnaList) { ncRnaProperties.put(NcRNANode.POSITIONS_PROPERTY, ncRnaPositionsSt); long ncRnaID = inserter.createNode(ncRnaProperties); inserter.createRelationship(genomeElementId, ncRnaID, genomeElementNcRnaRel, null); } // -----------r rna----------------- for (String rRnaPositionsSt : rRnaList) { rRnaProperties.put(RRNANode.POSITIONS_PROPERTY, rRnaPositionsSt); long rRnaID = inserter.createNode(rRnaProperties); inserter.createRelationship(genomeElementId, rRnaID, genomeElementRRnaRel, null); } // -----------tm rna----------------- for (String tmRnaPositionsSt : tmRnaList) { tmRnaProperties.put(TmRNANode.POSITIONS_PROPERTY, tmRnaPositionsSt); long tmRnaID = inserter.createNode(tmRnaProperties); inserter.createRelationship(genomeElementId, tmRnaID, genomeElementTmRnaRel, null); } // -----------t rna----------------- for (String tRnaPositionsSt : tRnaList) { tRnaProperties.put(TRNANode.POSITIONS_PROPERTY, tRnaPositionsSt); long tRnaID = inserter.createNode(tRnaProperties); inserter.createRelationship(genomeElementId, tRnaID, genomeElementTRnaRel, null); } } } } } } catch (Exception e) { logger.log(Level.SEVERE, e.getMessage()); StackTraceElement[] trace = e.getStackTrace(); for (StackTraceElement stackTraceElement : trace) { logger.log(Level.SEVERE, stackTraceElement.toString()); } } finally { // shutdown, makes sure all changes are written to disk indexProvider.shutdown(); inserter.shutdown(); // closing logger file handler fh.close(); } }
public void read(InputStream stream) { XMLInputFactory inputFactory = XMLInputFactory.newInstance(); try { XMLStreamReader reader = inputFactory.createXMLStreamReader(stream); Map<String, String> keyIdMap = new HashMap<String, String>(); Map<String, String> keyTypesForNodes = new HashMap<String, String>(); Map<String, String> keyTypesForEdges = new HashMap<String, String>(); Map<String, String> keyAutoindexesForNodes = new HashMap<String, String>(); Map<String, String> keyAutoindexesForEdges = new HashMap<String, String>(); Map<String, Node> nodes = new HashMap<String, Node>(); List<Edge> orphanEdges = new ArrayList<Edge>(); Node currentNode = null; Edge currentEdge = null; String currentVertexId = null; String currentEdgeId = null; boolean inVertex = false; boolean inEdge = false; int graphDepth = 0; while (reader.hasNext()) { Integer eventType = reader.next(); if (eventType.equals(XMLEvent.START_ELEMENT)) { String elementName = reader.getName().getLocalPart(); if (elementName.equals(GraphMLTokens.KEY)) { String id = reader.getAttributeValue(null, GraphMLTokens.ID); String attributeName = reader.getAttributeValue(null, GraphMLTokens.ATTR_NAME); String attributeType = reader.getAttributeValue(null, GraphMLTokens.ATTR_TYPE); String attributeAutoindexName = reader.getAttributeValue(null, GraphMLTokens.ATTR_AUTOINDEX); String attributeFor = reader.getAttributeValue(null, GraphMLTokens.FOR); keyIdMap.put(id, attributeName); if (GraphMLTokens.NODE.equalsIgnoreCase(attributeFor)) { keyTypesForNodes.put(attributeName, attributeType); if (attributeAutoindexName != null) { keyAutoindexesForNodes.put(attributeName, attributeAutoindexName); } } else { if (GraphMLTokens.EDGE.equalsIgnoreCase(attributeFor)) { keyTypesForEdges.put(attributeName, attributeType); if (attributeAutoindexName != null) { keyAutoindexesForEdges.put(attributeName, attributeAutoindexName); } } } } else if (elementName.equals(GraphMLTokens.NODE) && isRootGraph(graphDepth)) { currentVertexId = reader.getAttributeValue(null, GraphMLTokens.ID); if (currentVertexId != null) { Node node = graphDatabaseService.createNode(); currentNode = node; nodes.put(currentVertexId, node); } inVertex = true; } else if (elementName.equals(GraphMLTokens.EDGE) && isRootGraph(graphDepth)) { currentEdgeId = reader.getAttributeValue(null, GraphMLTokens.ID); String edgeLabel = reader.getAttributeValue(null, GraphMLTokens.LABEL); edgeLabel = edgeLabel == null ? GraphMLTokens._DEFAULT : edgeLabel; String sourceId = reader.getAttributeValue(null, GraphMLTokens.SOURCE); String targetId = reader.getAttributeValue(null, GraphMLTokens.TARGET); currentEdge = new Edge(currentEdgeId, sourceId, targetId, edgeLabel); inEdge = true; } else if (elementName.equals(GraphMLTokens.DATA) && isRootGraph(graphDepth)) { String attributeName = reader.getAttributeValue(null, GraphMLTokens.KEY); if (isInsideNodeTag(inVertex)) { if (keyTypesForNodes.containsKey(attributeName)) { String value = reader.getElementText(); Object typeCastValue = typeCastValue(attributeName, value, keyTypesForNodes); if (GraphMLTokens.ID.equals(attributeName)) { throw new IllegalArgumentException( "id key is reserved for node. Node with errorneous data: " + currentVertexId); } if (currentNode != null) { // inserted directly to neo4j currentNode.setProperty(attributeName, typeCastValue); if (keyAutoindexesForNodes.containsKey(attributeName)) { String autoindexName = keyAutoindexesForNodes.get(attributeName); this.graphDatabaseService .index() .forNodes(autoindexName) .add(currentNode, attributeName, typeCastValue); } } } else { throw new IllegalArgumentException( "Attribute key: " + attributeName + " is not declared."); } } else { if (isInsideEdgeTag(inEdge)) { if (keyTypesForEdges.containsKey(attributeName)) { String value = reader.getElementText(); Object typeCastValue = typeCastValue(attributeName, value, keyTypesForEdges); if (GraphMLTokens.LABEL.equals(attributeName)) { throw new IllegalArgumentException( "label key is reserved for edge. Edge with errorneous data: " + currentEdgeId); } if (currentEdge != null) { // saved inmemory edge currentEdge.putData(attributeName, typeCastValue); } } else { throw new IllegalArgumentException( "Attribute key: " + attributeName + " is not declared."); } } } } else if (elementName.equals(GraphMLTokens.INDEX) && isRootGraph(graphDepth)) { if (isInsideNodeTag(inVertex)) { // add custom index over currentNode String indexName = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_NAME); String indexKey = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_KEY); String indexConfiguration = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_CONFIGURATION); String indexData = reader.getElementText(); if (Strings.isNullOrEmpty(indexConfiguration)) { this.graphDatabaseService .index() .forNodes(indexName) .add(currentNode, indexKey, indexData); } else { String[] indexConfigurationTokens = indexConfiguration.split(GraphMLTokens.ATTR_INDEX_CONFIGURATION_SEPARATOR); this.graphDatabaseService .index() .forNodes(indexName, MapUtil.stringMap(indexConfigurationTokens)) .add(currentNode, indexKey, indexData); } } else { if (isInsideEdgeTag(inEdge)) { // add custom index over currentEdge String indexName = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_NAME); String indexKey = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_KEY); String indexConfiguration = reader.getAttributeValue(null, GraphMLTokens.ATTR_INDEX_CONFIGURATION); String indexData = reader.getElementText(); if (Strings.isNullOrEmpty(indexConfiguration)) { currentEdge.putManualIndex(indexName, indexKey, indexData); } else { String[] indexConfigurationTokens = indexConfiguration.split(GraphMLTokens.ATTR_INDEX_CONFIGURATION_SEPARATOR); currentEdge.putManualIndex( indexName, indexKey, indexData, MapUtil.stringMap(indexConfigurationTokens)); } } } } else if (elementName.equals(GraphMLTokens.GRAPH)) { nodes.put("0", this.graphDatabaseService.getReferenceNode()); graphDepth++; } } else { if (eventType.equals(XMLEvent.END_ELEMENT)) { String elementName = reader.getName().getLocalPart(); if (elementName.equals(GraphMLTokens.NODE) && isRootGraph(graphDepth)) { currentNode = null; currentVertexId = null; inVertex = false; } else if (elementName.equals(GraphMLTokens.EDGE) && isRootGraph(graphDepth)) { addEdge(nodes, orphanEdges, currentEdge, keyAutoindexesForEdges); currentEdge = null; currentEdgeId = null; inEdge = false; } else if (elementName.equals(GraphMLTokens.GRAPHML)) { addOrphanEdgesWithNewParents(nodes, orphanEdges, keyAutoindexesForEdges); } else if (elementName.equals(GraphMLTokens.GRAPH)) { graphDepth--; } } } } reader.close(); } catch (XMLStreamException e) { throw new IOError(e); } }
private void startMember(InstanceId serverId) throws URISyntaxException, IOException { Clusters.Member member = spec.getMembers().get(serverId.toIntegerIndex() - 1); StringBuilder initialHosts = new StringBuilder(spec.getMembers().get(0).getHost()); for (int i = 1; i < spec.getMembers().size(); i++) { initialHosts.append(",").append(spec.getMembers().get(i).getHost()); } File parent = new File(root, name); URI clusterUri = new URI("cluster://" + member.getHost()); if (member.isFullHaMember()) { int clusterPort = clusterUri.getPort(); int haPort = clusterUri.getPort() + 3000; File storeDir = new File(parent, "server" + serverId); if (storeDirInitializer != null) { storeDirInitializer.initializeStoreDir(serverId.toIntegerIndex(), storeDir); } GraphDatabaseBuilder builder = dbFactory.newHighlyAvailableDatabaseBuilder(storeDir.getAbsolutePath()); builder.setConfig(ClusterSettings.cluster_name, name); builder.setConfig(ClusterSettings.initial_hosts, initialHosts.toString()); builder.setConfig(ClusterSettings.server_id, serverId + ""); builder.setConfig(ClusterSettings.cluster_server, "0.0.0.0:" + clusterPort); builder.setConfig(HaSettings.ha_server, ":" + haPort); builder.setConfig(OnlineBackupSettings.online_backup_enabled, Settings.FALSE); builder.setConfig(commonConfig); if (instanceConfig.containsKey(serverId.toIntegerIndex())) { builder.setConfig(instanceConfig.get(serverId.toIntegerIndex())); } config(builder, name, serverId); final HighlyAvailableGraphDatabaseProxy graphDatabase = new HighlyAvailableGraphDatabaseProxy(builder); members.put(serverId, graphDatabase); life.add( new LifecycleAdapter() { @Override public void stop() throws Throwable { graphDatabase.get().shutdown(); } }); } else { Map<String, String> config = MapUtil.stringMap( ClusterSettings.cluster_name.name(), name, ClusterSettings.initial_hosts.name(), initialHosts.toString(), ClusterSettings.server_id.name(), serverId + "", ClusterSettings.cluster_server.name(), "0.0.0.0:" + clusterUri.getPort(), GraphDatabaseSettings.store_dir.name(), new File(parent, "arbiter" + serverId).getAbsolutePath()); Config config1 = new Config( config, InternalAbstractGraphDatabase.Configuration.class, GraphDatabaseSettings.class); ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory(); ClusterClient clusterClient = new ClusterClient( new Monitors(), ClusterClient.adapt(config1), NullLogService.getInstance(), new NotElectableElectionCredentialsProvider(), objectStreamFactory, objectStreamFactory); arbiters.add( new ClusterMembers( clusterClient, clusterClient, new ClusterMemberEvents() { @Override public void addClusterMemberListener(ClusterMemberListener listener) { // noop } @Override public void removeClusterMemberListener(ClusterMemberListener listener) { // noop } }, clusterClient.getServerId())); life.add(new FutureLifecycleAdapter<>(clusterClient)); } }