@Override public <A> long put(A value, Serializer<A> serializer) { assert (value != null); DataIO.DataOutputByteArray out = serialize(value, serializer); final long ioRecid; if (!disableLocks) { newRecidLock.readLock().lock(); } try { if (!disableLocks) { structuralLock.lock(); } final long[] indexVals; try { ioRecid = freeIoRecidTake(true); indexVals = physAllocate(out.pos, true, false); } finally { if (!disableLocks) { structuralLock.unlock(); } } final Lock lock; if (disableLocks) { lock = null; } else { lock = locks[Store.lockPos(ioRecid)].writeLock(); lock.lock(); } try { put2(out, ioRecid, indexVals); } finally { if (!disableLocks) { lock.unlock(); } } } finally { if (!disableLocks) { newRecidLock.readLock().unlock(); } } long recid = (ioRecid - IO_USER_START) / 8; assert (recid > 0); if (CC.LOG_STORE && LOG.isLoggable(Level.FINEST)) LOG.finest( "Put recid=" + recid + ", " + " size=" + out.pos + ", " + " val=" + value + " ser=" + serializer); recycledDataOuts.offer(out); return recid; }
protected void update2(DataIO.DataOutputByteArray out, long ioRecid) { final long indexVal = index.getLong(ioRecid); final int size = (int) (indexVal >>> 48); final boolean linked = (indexVal & MASK_LINKED) != 0; assert (disableLocks || locks[Store.lockPos(ioRecid)].writeLock().isHeldByCurrentThread()); if (!linked && out.pos > 0 && size > 0 && size2ListIoRecid(size) == size2ListIoRecid(out.pos)) { // size did change, but still fits into this location final long offset = indexVal & MASK_OFFSET; // note: if size would not change, we still have to write MASK_ARCHIVE bit index.putLong(ioRecid, (((long) out.pos) << 48) | offset | MASK_ARCHIVE); phys.putData(offset, out.buf, 0, out.pos); } else { long[] indexVals = spaceReclaimTrack ? getLinkedRecordsIndexVals(indexVal) : null; if (!disableLocks) { structuralLock.lock(); } try { if (spaceReclaimTrack) { // free first record pointed from indexVal if (size > 0) freePhysPut(indexVal, false); // if there are more linked records, free those as well if (indexVals != null) { for (int i = 0; i < indexVals.length && indexVals[i] != 0; i++) { freePhysPut(indexVals[i], false); } } } indexVals = physAllocate(out.pos, true, false); } finally { if (!disableLocks) { structuralLock.unlock(); } } put2(out, ioRecid, indexVals); } assert (disableLocks || locks[Store.lockPos(ioRecid)].writeLock().isHeldByCurrentThread()); }
@Override public void compact() { if (readOnly) throw new IllegalAccessError(); final File indexFile = index.getFile(); final File physFile = phys.getFile(); final int rafMode; if (index instanceof Volume.FileChannelVol) { rafMode = 2; } else if (index instanceof Volume.MappedFileVol && phys instanceof Volume.FileChannelVol) { rafMode = 1; } else { rafMode = 0; } lockAllWrite(); try { final File compactedFile = new File( (indexFile != null ? indexFile : File.createTempFile("mapdb", "compact")) + ".compact"); Volume.Factory fab = Volume.fileFactory(compactedFile, rafMode, false, sizeLimit, CC.VOLUME_SLICE_SHIFT, 0); StoreDirect store2 = new StoreDirect(fab, false, false, 5, false, 0L, checksum, compress, password, false, 0); compactPreUnderLock(); index.putLong(IO_PHYS_SIZE, physSize); index.putLong(IO_INDEX_SIZE, indexSize); index.putLong(IO_FREE_SIZE, freeSize); // create secondary files for compaction store2.lockAllWrite(); // transfer stack of free recids // TODO long stack take modifies the original store for (long recid = longStackTake(IO_FREE_RECID, false); recid != 0; recid = longStackTake(IO_FREE_RECID, false)) { store2.longStackPut(IO_FREE_RECID, recid, false); } // iterate over recids and transfer physical records store2.index.putLong(IO_INDEX_SIZE, indexSize); for (long ioRecid = IO_USER_START; ioRecid < indexSize; ioRecid += 8) { byte[] bb = get2(ioRecid, Serializer.BYTE_ARRAY_NOSIZE); store2.index.ensureAvailable(ioRecid + 8); if (bb == null || bb.length == 0) { store2.index.putLong(ioRecid, 0); } else { DataIO.DataOutputByteArray out = serialize(bb, Serializer.BYTE_ARRAY_NOSIZE); long[] indexVals = store2.physAllocate(out.pos, true, false); store2.put2(out, ioRecid, indexVals); } } File indexFile2 = store2.index.getFile(); File physFile2 = store2.phys.getFile(); store2.unlockAllWrite(); final boolean useDirectBuffer = index instanceof Volume.MemoryVol && ((Volume.MemoryVol) index).useDirectBuffer; index.sync(); // TODO is sync needed here? index.close(); index = null; phys.sync(); // TODO is sync needed here? phys.close(); phys = null; if (indexFile != null) { final long time = System.currentTimeMillis(); final File indexFile_ = indexFile != null ? new File(indexFile.getPath() + "_" + time + "_orig") : null; final File physFile_ = physFile != null ? new File(physFile.getPath() + "_" + time + "_orig") : null; store2.close(); // not in memory, so just rename files if (!indexFile.renameTo(indexFile_)) throw new AssertionError("could not rename file"); if (!physFile.renameTo(physFile_)) throw new AssertionError("could not rename file"); if (!indexFile2.renameTo(indexFile)) throw new AssertionError("could not rename file"); // TODO process may fail in middle of rename, analyze sequence and add recovery if (!physFile2.renameTo(physFile)) throw new AssertionError("could not rename file"); final Volume.Factory fac2 = Volume.fileFactory(indexFile, rafMode, false, sizeLimit, CC.VOLUME_SLICE_SHIFT, 0); index = fac2.createIndexVolume(); phys = fac2.createPhysVolume(); indexFile_.delete(); physFile_.delete(); } else { // in memory, so copy files into memory Volume indexVol2 = new Volume.MemoryVol(useDirectBuffer, sizeLimit, CC.VOLUME_SLICE_SHIFT); Volume.volumeTransfer(indexSize, store2.index, indexVol2); Volume physVol2 = new Volume.MemoryVol(useDirectBuffer, sizeLimit, CC.VOLUME_SLICE_SHIFT); Volume.volumeTransfer(store2.physSize, store2.phys, physVol2); store2.close(); index = indexVol2; phys = physVol2; } physSize = store2.physSize; freeSize = store2.freeSize; index.putLong(IO_PHYS_SIZE, physSize); index.putLong(IO_INDEX_SIZE, indexSize); index.putLong(IO_FREE_SIZE, freeSize); index.putLong(IO_INDEX_SUM, indexHeaderChecksum()); maxUsedIoList = IO_USER_START - 8; while (index.getLong(maxUsedIoList) != 0 && maxUsedIoList > IO_FREE_RECID) maxUsedIoList -= 8; compactPostUnderLock(); } catch (IOException e) { throw new IOError(e); } finally { unlockAllWrite(); } }