protected void relocation(Data<V> newValue, long newSizeOfEverythingBeforeValue) {
   s.innerWriteLock.lock();
   s.free(pos, entrySizeInChunks);
   long entrySize = innerEntrySize(newSizeOfEverythingBeforeValue, newValue.size());
   allocatedChunks.initEntryAndKeyCopying(entrySize, valueSizeOffset - keySizeOffset);
   writeValueAndPutPos(newValue);
 }
示例#2
0
  public void removeDuplicatesInSegment() {
    long startHlPos = 0L;
    VanillaChronicleMap<?, ?, ?> m = mh.m();
    CompactOffHeapLinearHashTable hashLookup = m.hashLookup;
    long currentTierBaseAddr = s.tierBaseAddr;
    while (!hashLookup.empty(hashLookup.readEntry(currentTierBaseAddr, startHlPos))) {
      startHlPos = hashLookup.step(startHlPos);
    }
    long hlPos = startHlPos;
    int steps = 0;
    long entries = 0;
    tierIteration:
    do {
      hlPos = hashLookup.step(hlPos);
      steps++;
      long entry = hashLookup.readEntry(currentTierBaseAddr, hlPos);
      if (!hashLookup.empty(entry)) {
        e.readExistingEntry(hashLookup.value(entry));
        Data key = (Data) e.key();
        try (ExternalMapQueryContext<?, ?, ?> c = m.queryContext(key)) {
          MapEntry<?, ?> entry2 = c.entry();
          Data<?> key2 = ((MapEntry) c).key();
          if (key2.bytes().address(key2.offset()) != key.bytes().address(key.offset())) {
            lh.LOG.error(
                "entries with duplicate key {} in segment {}: "
                    + "with values {} and {}, removing the latter",
                key,
                c.segmentIndex(),
                entry2 != null ? ((MapEntry) c).value() : "<deleted>",
                !e.entryDeleted() ? e.value() : "<deleted>");
            if (hashLookup.remove(currentTierBaseAddr, hlPos) != hlPos) {
              hlPos = hashLookup.stepBack(hlPos);
              steps--;
            }
            continue tierIteration;
          }
        }
        entries++;
      }
      // the `steps == 0` condition and this variable updates in the loop fix the bug, when
      // shift deletion occurs on the first entry of the tier, and the hlPos
      // becomes equal to start pos without making the whole loop, but only visiting a single
      // entry
    } while (hlPos != startHlPos || steps == 0);

    recoverTierEntriesCounter(entries);
    recoverLowestPossibleFreeChunkTiered();
  }
 public void initValueWithoutSize(
     Data<?> value, long oldValueSizeOffset, long oldValueSize, long oldValueOffset) {
   assert oldValueSize == value.size();
   initValSizeEqualToOld(oldValueSizeOffset, oldValueSize, oldValueOffset);
   writeValue(value);
 }
 public void writeValue(Data<?> value) {
   value.writeTo(entryBS, valueOffset);
 }
 public void initValue(Data<?> value) {
   entryBytes.position(valueSizeOffset);
   initValSize(value.size());
   writeValue(value);
 }
  public void innerDefaultReplaceValue(Data<V> newValue) {
    assert s.innerUpdateLock.isHeldByCurrentThread();

    boolean newValueSizeIsDifferent = newValue.size() != this.valueSize;
    if (newValueSizeIsDifferent) {
      long newSizeOfEverythingBeforeValue = newSizeOfEverythingBeforeValue(newValue);
      long entryStartOffset = keySizeOffset;
      VanillaChronicleMap<?, ?, ?, ?, ?, ?, ?> m = mh.m();
      long newValueOffset =
          m.alignment.alignAddr(entryStartOffset + newSizeOfEverythingBeforeValue);
      long newEntrySize = newValueOffset + newValue.size() - entryStartOffset;
      int newSizeInChunks = m.inChunks(newEntrySize);
      newValueDoesNotFit:
      if (newSizeInChunks > entrySizeInChunks) {
        if (newSizeInChunks > m.maxChunksPerEntry) {
          throw new IllegalArgumentException(
              "Value too large: "
                  + "entry takes "
                  + newSizeInChunks
                  + " chunks, "
                  + m.maxChunksPerEntry
                  + " is maximum.");
        }
        if (s.freeList.allClear(pos + entrySizeInChunks, pos + newSizeInChunks)) {
          s.freeList.set(pos + entrySizeInChunks, pos + newSizeInChunks);
          break newValueDoesNotFit;
        }
        relocation(newValue, newSizeOfEverythingBeforeValue);
        return;
      } else if (newSizeInChunks < entrySizeInChunks) {
        // Freeing extra chunks
        s.freeList.clear(pos + newSizeInChunks, pos + entrySizeInChunks);
        // Do NOT reset nextPosToSearchFrom, because if value
        // once was larger it could easily became larger again,
        // But if these chunks will be taken by that time,
        // this entry will need to be relocated.
      }
      // new size != old size => size is not constant => size is actually written =>
      // to prevent (at least) this execution:
      // 1. concurrent reader thread reads the size
      // 2. this thread updates the size and the value
      // 3. concurrent reader reads the value
      // We MUST upgrade to exclusive lock
    } else {
      // TODO to turn the following block on, JLANG-46 is required. Also unclear what happens
      // if the value is DataValue generated with 2, 4 or 8 distinct bytes, putting on-heap
      // implementation of such value is also not atomic currently, however there is a way
      // to make it atomic, we should identify such cases and make a single write:
      // state = UNSAFE.getLong(onHeapValueObject, offsetToTheFirstField);
      // bytes.writeLong(state);
      //            boolean newValueSizeIsPowerOf2 = ((newValueSize - 1L) & newValueSize) != 0;
      //            if (!newValueSizeIsPowerOf2 || newValueSize > 8L) {
      //                // if the new value size is 1, 2, 4, or 8, it is written not atomically only
      // if
      //                // the user provided own marshaller and writes value byte-by-byte, that is
      // very
      //                // unlikely. in this case the user should update acquire write lock before
      // write
      //                // updates himself
      //                upgradeToWriteLock();
      //            }
    }
    s.innerWriteLock.lock();

    if (newValueSizeIsDifferent) {
      initValue(newValue);
    } else {
      writeValue(newValue);
    }
    hashLookup.putValueVolatile(hlp.hashLookupPos, pos);
  }
 public long newSizeOfEverythingBeforeValue(Data<V> newValue) {
   return valueSizeOffset
       + mh.m().valueSizeMarshaller.sizeEncodingSize(newValue.size())
       - keySizeOffset;
 }