private SensorMeta getSensorMeta(
     StationMeta stationMeta, String sensorName, boolean createIfNotExists) {
   throwNull(stationMeta);
   throwNull(sensorName);
   BTreeMap<String, SensorMeta> sensorMap = db.getTreeMap(stationMeta.db_name_sensor_map);
   SensorMeta sensorMeta = sensorMap.get(sensorName);
   if (sensorMeta == null && createIfNotExists) {
     sensorMeta = new SensorMeta(stationMeta.stationName, sensorName);
     db.checkNameNotExists(sensorMeta.db_name_sensor_chunk_map);
     db.createTreeMap(sensorMeta.db_name_sensor_chunk_map)
         .keySerializer(BTreeKeySerializer.ZERO_OR_POSITIVE_INT)
         // .valueSerializer(Chunk.DELTA_TIME_DELTA_DELTA_VALUE_INT_QUANTIZED_SERIALIZER)
         // .valueSerializer(Chunk.SNAPPY_DELTA_TIME_DELTA_DELTA_VALUE_INT_QUANTIZED_SERIALIZER)
         .valueSerializer(ChunkSerializer.DEFAULT)
         // .valuesOutsideNodesEnable() // !!! does not work: growing database
         // .
         .makeOrGet();
     db.checkNameNotExists(sensorMeta.db_name_sensor_chunkmeta_map);
     db.createTreeMap(sensorMeta.db_name_sensor_chunkmeta_map)
         .keySerializer(BTreeKeySerializer.ZERO_OR_POSITIVE_INT)
         .valueSerializer(ChunkMeta.SERIALIZER)
         .makeOrGet();
     sensorMap.put(sensorName, sensorMeta);
   }
   if (sensorMeta == null) {
     // new Throwable().printStackTrace();
     log.warn("no sensor: " + sensorName + "  in station: " + stationMeta.stationName);
   }
   return sensorMeta;
 }
Exemple #2
0
  @Test
  public void WriteDBInt_lastKey_set_middle() {
    int numberOfRecords = 1000;

    /* Creates connections to MapDB */
    DB db1 = DBMaker.memoryDB().transactionDisable().make();

    /* Creates maps */
    NavigableSet<Integer> map1 = db1.treeSet("column1");

    /* Inserts initial values in maps */
    for (int i = 0; i < numberOfRecords; i++) {
      map1.add(i);
    }

    assertEquals((Object) (numberOfRecords - 1), map1.last());

    map1.clear();

    /* Inserts some values in maps */
    for (int i = 100; i < 110; i++) {
      map1.add(i);
    }

    assertEquals(10, map1.size());
    assertFalse(map1.isEmpty());
    assertEquals((Object) 109, map1.last());
    assertEquals((Object) 100, map1.first());
  }
Exemple #3
0
 @Deactivate
 public void deactivate() {
   timer.cancel();
   for (Map.Entry<String, Object> entry : localDB.getAll().entrySet()) {
     String key = entry.getKey();
     Object value = entry.getValue();
     // This is a map implementation to be handled as such
     if (value instanceof Map) {
       Map asMap = (Map) value;
       if (asMap.isEmpty()) {
         // the map is empty and may be deleted
         localDB.delete(key);
       }
       // This is a set implementation and can be handled as such
     } else if (value instanceof Set) {
       Set asSet = (Set) value;
       if (asSet.isEmpty()) {
         // the set is empty and may be deleted
         localDB.delete(key);
       }
     }
   }
   localDB.commit();
   localDB.close();
   log.info("Stopped");
 }
Exemple #4
0
  @Test(timeout = 600000)
  public void expire_maxSize_with_TTL_get() throws InterruptedException {
    if (UtilsTest.scale() == 0) return;

    File f = UtilsTest.tempDbFile();
    for (int o = 0; o < 2; o++) {
      final DB db = DBMaker.fileDB(f).transactionDisable().make();
      final HTreeMap<Object, Object> map =
          db.hashMapCreate("foo")
              .expireMaxSize(1000)
              .expireAfterAccess(3, TimeUnit.SECONDS)
              .makeOrGet();

      map.put("foo", "bar");

      for (int i = 0; i < 10; i++) assertEquals("bar", map.get("foo"));

      Thread.sleep(6000);
      map.get("aa"); // so internal tasks have change to run
      assertEquals(null, map.get("foo"));

      db.commit();
      db.close();
      Thread.sleep(1100);
    }
  }
Exemple #5
0
  @SuppressWarnings("unchecked")
  private Map<BigInteger, Order> openMap(DB database) {
    // OPEN MAP
    BTreeMap<BigInteger, Order> map =
        database.createTreeMap("orders").valueSerializer(new OrderSerializer()).makeOrGet();

    // HAVE/WANT KEY
    this.haveWantKeyMap =
        database.createTreeMap("orders_key_have_want").comparator(Fun.COMPARATOR).makeOrGet();

    // BIND HAVE/WANT KEY
    Bind.secondaryKey(
        map,
        this.haveWantKeyMap,
        new Fun.Function2<Tuple4<Long, Long, BigDecimal, BigInteger>, BigInteger, Order>() {
          @Override
          public Tuple4<Long, Long, BigDecimal, BigInteger> run(BigInteger key, Order value) {
            return new Tuple4<Long, Long, BigDecimal, BigInteger>(
                value.getHave(), value.getWant(), value.getPrice(), key);
          }
        });

    // RETURN
    return map;
  }
Exemple #6
0
  @Test
  public void test() {
    DB db =
        DBMaker.newTempFileDB()
            .mmapFileEnableIfSupported()
            .compressionEnable()
            .transactionDisable()
            .checksumEnable()
            .commitFileSyncDisable()
            .make();
    Iterator<Fun.Pair<Long, String>> newIterator =
        new Iterator<Fun.Pair<Long, String>>() {
          private AtomicLong value = new AtomicLong(10000000);

          @Override
          public boolean hasNext() {
            return value.get() > 0;
          }

          @Override
          public Fun.Pair<Long, String> next() {
            Long v = value.decrementAndGet();
            return new Fun.Pair<Long, String>(v, v.toString());
          }

          @Override
          public void remove() {}
        };
    BTreeMap<Long, String> cubeData = db.createTreeMap("data").pumpSource(newIterator).make();
  }
  private StationMeta getStationMeta(String stationName, boolean createIfNotExists) {
    throwNull(stationName);
    StationMeta stationMeta = stationMetaMap.get(stationName);
    if (stationMeta == null && createIfNotExists) {
      stationMeta = new StationMeta(stationName);

      db.checkNameNotExists(stationMeta.db_name_sensor_map);
      db.createTreeMap(stationMeta.db_name_sensor_map)
          .keySerializer(BTreeKeySerializer.STRING)
          .valueSerializer(SensorMeta.SERIALIZER)
          .makeOrGet();

      db.checkNameNotExists(stationMeta.db_name_sensor_time_series_mask_map);
      db.createTreeMap(stationMeta.db_name_sensor_time_series_mask_map)
          .keySerializer(BTreeKeySerializer.STRING)
          .valueSerializer(TimeSeriesMask.SERIALIZER)
          .makeOrGet();

      stationMetaMap.put(stationName, stationMeta);
    }
    if (stationMeta == null) {
      // new Throwable().printStackTrace();
      log.warn("no station: " + stationName);
    }
    return stationMeta;
  }
Exemple #8
0
  @Test
  public void concurrent_first_key() {
    DB db = DBMaker.memoryDB().transactionDisable().make();
    final BTreeMap m = db.treeMap("name");

    // fill
    final int c = 1000000 * TT.scale();
    for (int i = 0; i <= c; i++) {
      m.put(i, i);
    }

    Thread t =
        new Thread() {
          @Override
          public void run() {
            for (int i = 0; i <= c; i++) {
              m.remove(c);
            }
          }
        };
    t.run();
    while (t.isAlive()) {
      assertNotNull(m.firstKey());
    }
  }
Exemple #9
0
  @Test
  public void WriteDBInt_lastKey() {
    int numberOfRecords = 1000;

    /* Creates connections to MapDB */
    DB db1 = DBMaker.memoryDB().transactionDisable().make();

    /* Creates maps */
    ConcurrentNavigableMap<Integer, Integer> map1 = db1.treeMap("column1");

    /* Inserts initial values in maps */
    for (int i = 0; i < numberOfRecords; i++) {
      map1.put(i, i);
    }

    assertEquals((Object) (numberOfRecords - 1), map1.lastKey());

    map1.clear();

    /* Inserts some values in maps */
    for (int i = 0; i < 10; i++) {
      map1.put(i, i);
    }

    assertEquals(10, map1.size());
    assertFalse(map1.isEmpty());
    assertEquals((Object) 9, map1.lastKey());
    assertEquals((Object) 9, map1.lastEntry().getValue());
    assertEquals((Object) 0, map1.firstKey());
    assertEquals((Object) 0, map1.firstEntry().getValue());
  }
Exemple #10
0
  @Test
  public void mod_listener_lock() {
    DB db = DBMaker.memoryDB().transactionDisable().make();
    final BTreeMap m = db.treeMap("name");

    final long rootRecid = db.getEngine().get(m.rootRecidRef, Serializer.RECID);
    final AtomicInteger counter = new AtomicInteger();

    m.modificationListenerAdd(
        new Bind.MapListener() {
          @Override
          public void update(Object key, Object oldVal, Object newVal) {
            assertTrue(m.nodeLocks.get(rootRecid) == Thread.currentThread());
            assertEquals(1, m.nodeLocks.size());
            counter.incrementAndGet();
          }
        });

    m.put("aa", "aa");
    m.put("aa", "bb");
    m.remove("aa");

    m.put("aa", "aa");
    m.remove("aa", "aa");
    m.putIfAbsent("aa", "bb");
    m.replace("aa", "bb", "cc");
    m.replace("aa", "cc");

    assertEquals(8, counter.get());
  }
  @Override
  protected boolean swapAndClearCache(String namespaceKey, String cacheKey) {
    final Lock lock = nsLocks.get(namespaceKey);
    lock.lock();
    try {
      Preconditions.checkArgument(
          mmapDB.exists(cacheKey), "Namespace [%s] does not exist", cacheKey);

      final String swapCacheKey = UUID.randomUUID().toString();
      mmapDB.rename(cacheKey, swapCacheKey);

      final String priorCache = currentNamespaceCache.put(namespaceKey, swapCacheKey);
      if (priorCache != null) {
        // TODO: resolve what happens here if query is actively going on
        mmapDB.delete(priorCache);
        dataSize.set(tmpFile.length());
        return true;
      } else {
        dataSize.set(tmpFile.length());
        return false;
      }
    } finally {
      lock.unlock();
    }
  }
 /** write all data to disk */
 public void commit() {
   synchronized (db) {
     if (!db.isClosed()) {
       db.commit();
     }
   }
 }
Exemple #13
0
  @Test
  public void circular_queue_persisted() {
    // i put disk limit 4 objects ,
    File f = UtilsTest.tempDbFile();
    DB db = DBMaker.newFileDB(f).transactionDisable().cacheDisable().make();
    Queue queue = db.createCircularQueue("test", null, 4);
    // when i put 6 objects to queue
    queue.add(0);
    queue.add(1);
    queue.add(2);
    queue.add(3);
    // now deletes 0 on first
    queue.add(4);
    // now deletes 1
    queue.add(5);

    db.close();
    db = DBMaker.newFileDB(f).transactionDisable().cacheDisable().deleteFilesAfterClose().make();
    queue = db.getCircularQueue("test");

    assertEquals(2, queue.poll());
    assertEquals(3, queue.poll());
    assertEquals(4, queue.poll());
    assertEquals(5, queue.poll());
    assertNull(queue.poll());
    db.close();
  }
Exemple #14
0
  @Test
  @org.junit.Ignore
  public void large_node_size() {
    for (int i : new int[] {10, 200, 6000}) {

      int max = i * 100;
      File f = TT.tempDbFile();
      DB db = DBMaker.fileDB(f).transactionDisable().make();
      Map m =
          db.treeMapCreate("map")
              .nodeSize(i)
              .keySerializer(BTreeKeySerializer.INTEGER)
              .valueSerializer(Serializer.INTEGER)
              .make();

      for (int j = 0; j < max; j++) {
        m.put(j, j);
      }

      db.close();
      db = DBMaker.fileDB(f).deleteFilesAfterClose().transactionDisable().make();
      m = db.treeMap("map");

      for (Integer j = 0; j < max; j++) {
        assertEquals(j, m.get(j));
      }
      db.close();
    }
  }
Exemple #15
0
 @Test
 public void testGetTreeSet() throws Exception {
   Set m1 = db.getTreeSet("test");
   m1.add(1);
   m1.add(2);
   assertTrue(m1 == db.getTreeSet("test"));
   assertEquals(m1, new DB(engine).getTreeSet("test"));
 }
Exemple #16
0
 @Test
 public void testGetTreeMap() throws Exception {
   Map m1 = db.getTreeMap("test");
   m1.put(1, 2);
   m1.put(3, 4);
   assertTrue(m1 == db.getTreeMap("test"));
   assertEquals(m1, new DB(engine).getTreeMap("test"));
 }
 public TxMaker makeTxMaker() {
   props.setProperty(Keys.fullTx, TRUE);
   snapshotEnable();
   Engine e = makeEngine();
   // init catalog if needed
   DB db = new DB(e);
   db.commit();
   return new TxMaker(e, propsGetBool(Keys.strictDBGet), propsGetBool(Keys.snapshots));
 }
 /**
  * Properly close the database. Note: The database should be ACID even when not properly closed.
  */
 public void closeDatabase() {
   if (database != null && !database.isClosed()) {
     replaceClassLoader();
     database.close();
     resetOldClassLoader();
   }
   isClosed = true;
   resetOldClassLoader();
 }
 /** Clears the database on the computer and resets it. */
 public void clearAndResetMap() {
   if (database != null && !database.isClosed()) {
     replaceClassLoader();
     database.delete(INTERVALS);
     database.commit();
     resetOldClassLoader();
     set = createSet();
   }
 }
Exemple #20
0
 @Test
 public void testMapDb() throws InterruptedException {
   DB database = DBMaker.newMemoryDB().make();
   BlockingQueue<String> queue = database.getQueue("test-queue");
   queue.put("test-value");
   database.commit();
   assertThat(queue.take(), is("test-value"));
   database.commit();
   database.close();
 }
Exemple #21
0
  @Test
  public void getAll() {
    db.createAtomicString("aa", "100");
    db.getHashMap("zz").put(11, "12");
    Map all = db.getAll();

    assertEquals(2, all.size());
    assertEquals("100", ((Atomic.String) all.get("aa")).get());
    assertEquals("12", ((HTreeMap) all.get("zz")).get(11));
  }
 /** write all data to disk and close db */
 public void close() {
   synchronized (db) {
     if (!db.isClosed()) {
       log.info("commit...");
       db.commit();
       log.info("close...");
       db.close();
       log.trace("closed");
     }
   }
 }
  @Test
  public void testLong() {
    DB db = DBMaker.memoryDB().make();
    Map m = db.treeMap("test").keySerializer(Serializer.LONG).createOrOpen();

    for (long i = 0; i < 1000; i++) {
      m.put(i * i, i * i + 1);
    }

    for (long i = 0; i < 1000; i++) {
      assertEquals(i * i + 1, m.get(i * i));
    }
  }
 public static void main(String[] args) {
   DB db = DBMaker.memoryDB().make();
   // a
   HTreeMap<String, Long> map =
       db.hashMapCreate("map")
           .valueCreator(
               new Fun.Function1<Long, String>() {
                 @Override
                 public Long run(String o) {
                   return 1111L;
                 }
               })
           .makeOrGet();
   // z
 }
Exemple #25
0
  public static void main(String[] args) throws IOException {

    // Open db in temp directory
    File f = File.createTempFile("mapdb", "temp");
    DB db = DBMaker.newFileDB(f).make();

    // Open or create table
    Map<String, Person> dbMap = db.getTreeMap("personAndCity");

    // Add data
    Person bilbo = new Person("Bilbo", "The Shire");
    Person sauron = new Person("Sauron", "Mordor");
    Person radagast = new Person("Radagast", "Crazy Farm");

    dbMap.put("west", bilbo);
    dbMap.put("south", sauron);
    dbMap.put("mid", radagast);

    // Commit and close
    db.commit();
    db.close();

    //
    // Second option for using cystom values is to use your own serializer.
    // This usually leads to better performance as MapDB does not have to
    // analyze the class structure.
    //

    class CustomSerializer implements ValueSerializer<Person>, Serializable {

      @Override
      public void serialize(DataOutput out, Person value) throws IOException {
        out.writeUTF(value.getName());
        out.writeUTF(value.getCity());
      }

      @Override
      public Person deserialize(DataInput in, int available) throws IOException {
        return new Person(in.readUTF(), in.readUTF());
      }

      @Override
      public int fixedSize() {
        return -1;
      }
    }

    ValueSerializer<Person> serializer = new CustomSerializer();

    DB db2 = DBMaker.newTempFileDB().make();

    Map<String, Person> map2 = db2.createHashMap("map").valueSerializer(serializer).make();

    map2.put("North", new Person("Yet another dwarf", "Somewhere"));

    db2.commit();
    db2.close();
  }
  public StreamDB(String streamdbPathPrefix) {
    throwNull(streamdbPathPrefix);
    String pathName = streamdbPathPrefix;

    try {
      File dir = new File(streamdbPathPrefix);
      dir.getParentFile().mkdirs();
    } catch (Exception e) {
      log.error(e);
    }

    db =
        DBMaker.newFileDB(new File(pathName))
            // .checksumEnable()
            // .compressionEnable() //in new db disabled!
            // .transactionDisable()
            // .mmapFileEnable() //slow commit and close!!!
            .mmapFileEnablePartial()
            .asyncWriteEnable()
            .asyncWriteFlushDelay(500)
            .cacheWeakRefEnable()
            .cacheSize(1000000)
            .closeOnJvmShutdown()
            .make();

    stationMetaMap =
        db.createTreeMap(DB_NAME_STATION_MAP)
            .keySerializer(BTreeKeySerializer.STRING)
            .valueSerializer(StationMeta.SERIALIZER)
            .makeOrGet();
  }
  public void printStatistics() {
    for (StationMeta stationMeta : stationMetaMap.values()) {
      System.out.println(stationMeta.stationName);
      for (SensorMeta sensorMeta : getSensorMap(stationMeta).values()) {
        BTreeMap<Integer, ChunkMeta> sensorChunkMetaMap = getSensorChunkMetaMap(sensorMeta);
        int entryCount = 0;
        for (ChunkMeta chunkMeta : sensorChunkMetaMap.values()) {
          entryCount += chunkMeta.entryCount;
        }
        BTreeMap<Integer, Chunk> sensorChunkMap = getSensorChunkMap(sensorMeta);
        System.out.print(
            sensorMeta.sensorName
                + " "
                + sensorChunkMetaMap.size()
                + ";"
                + sensorChunkMap.size()
                + ":"
                + entryCount
                + "   ");
      }
      System.out.println();
    }

    for (String key : db.getAll().keySet()) {
      System.out.println(key);
    }
  }
  @Test
  public void testString() {

    DB db = DBMaker.memoryDB().make();
    Map m = db.treeMap("test").keySerializer(Serializer.STRING).createOrOpen();

    List<String> list = new ArrayList<String>();
    for (long i = 0; i < 1000; i++) {
      String s = "" + Math.random() + (i * i * i);
      m.put(s, s + "aa");
    }

    for (String s : list) {
      assertEquals(s + "aa", m.get(s));
    }
  }
Exemple #29
0
 protected synchronized void removeRecordsOlderThan(long millisInUtc) {
   RW_LOCK.writeLock().lock();
   try {
     if (millisInUtc <= 0 || stopped) {
       return;
     }
     long searchBound = TIME_BASED_KEYS.getCounterEndingAt(millisInUtc);
     LOGGER.debug("Removing records older than " + searchBound);
     NavigableMap<Long, JournalRecord> toRemove = this.records.headMap(searchBound);
     toRemove.clear();
     journalDB.commit();
     journalDB.compact();
   } finally {
     RW_LOCK.writeLock().unlock();
   }
 }
 @NotNull
 private String createLocalFile(@NotNull ObjectId id, @NotNull ObjectLoader loader)
     throws IOException {
   // Create LFS stream.
   final File tmpFile = new File(tempPath, UUID.randomUUID().toString());
   final MessageDigest md = createSha256();
   try (InputStream istream = loader.openStream();
       OutputStream ostream = new FileOutputStream(tmpFile)) {
     byte[] buffer = new byte[0x10000];
     while (true) {
       int size = istream.read(buffer);
       if (size <= 0) break;
       ostream.write(buffer, 0, size);
       md.update(buffer, 0, size);
     }
   }
   final String hash = new String(Hex.encodeHex(md.digest(), true));
   cacheSha256.putIfAbsent(id.name(), hash);
   cache.commit();
   // Rename file.
   final File lfsFile =
       new File(
           basePath,
           "lfs/objects/" + hash.substring(0, 2) + "/" + hash.substring(2, 4) + "/" + hash);
   makeParentDirs(lfsFile.getParentFile());
   if (lfsFile.exists()) {
     if (!tmpFile.delete()) {
       log.warn("Can't delete temporary file: {}", lfsFile.getAbsolutePath());
     }
   } else if (!tmpFile.renameTo(lfsFile)) {
     throw new IOException("Can't rename file: " + tmpFile + " -> " + lfsFile);
   }
   return hash;
 }