Exemplo n.º 1
0
  public void removeDuplicatesInSegment() {
    long startHlPos = 0L;
    VanillaChronicleMap<?, ?, ?> m = mh.m();
    CompactOffHeapLinearHashTable hashLookup = m.hashLookup;
    long currentTierBaseAddr = s.tierBaseAddr;
    while (!hashLookup.empty(hashLookup.readEntry(currentTierBaseAddr, startHlPos))) {
      startHlPos = hashLookup.step(startHlPos);
    }
    long hlPos = startHlPos;
    int steps = 0;
    long entries = 0;
    tierIteration:
    do {
      hlPos = hashLookup.step(hlPos);
      steps++;
      long entry = hashLookup.readEntry(currentTierBaseAddr, hlPos);
      if (!hashLookup.empty(entry)) {
        e.readExistingEntry(hashLookup.value(entry));
        Data key = (Data) e.key();
        try (ExternalMapQueryContext<?, ?, ?> c = m.queryContext(key)) {
          MapEntry<?, ?> entry2 = c.entry();
          Data<?> key2 = ((MapEntry) c).key();
          if (key2.bytes().address(key2.offset()) != key.bytes().address(key.offset())) {
            lh.LOG.error(
                "entries with duplicate key {} in segment {}: "
                    + "with values {} and {}, removing the latter",
                key,
                c.segmentIndex(),
                entry2 != null ? ((MapEntry) c).value() : "<deleted>",
                !e.entryDeleted() ? e.value() : "<deleted>");
            if (hashLookup.remove(currentTierBaseAddr, hlPos) != hlPos) {
              hlPos = hashLookup.stepBack(hlPos);
              steps--;
            }
            continue tierIteration;
          }
        }
        entries++;
      }
      // the `steps == 0` condition and this variable updates in the loop fix the bug, when
      // shift deletion occurs on the first entry of the tier, and the hlPos
      // becomes equal to start pos without making the whole loop, but only visiting a single
      // entry
    } while (hlPos != startHlPos || steps == 0);

    recoverTierEntriesCounter(entries);
    recoverLowestPossibleFreeChunkTiered();
  }
Exemplo n.º 2
0
  public int recoverTier(int segmentIndex) {
    s.freeList.clearAll();

    Logger log = lh.LOG;
    VanillaChronicleHash<?, ?, ?, ?> h = mh.h();
    CompactOffHeapLinearHashTable hl = h.hashLookup;
    long hlAddr = s.tierBaseAddr;

    long validEntries = 0;
    long hlPos = 0;
    do {
      long hlEntry = hl.readEntry(hlAddr, hlPos);
      nextHlPos:
      if (!hl.empty(hlEntry)) {
        // (*)
        hl.clearEntry(hlAddr, hlPos);
        if (validEntries >= h.maxEntriesPerHashLookup) {
          log.error(
              "Too many entries in tier with index {}, max is {}",
              s.tierIndex,
              h.maxEntriesPerHashLookup);
          break nextHlPos;
        }

        long searchKey = hl.key(hlEntry);
        long entryPos = hl.value(hlEntry);
        int si = checkEntry(searchKey, entryPos, segmentIndex);
        if (si < 0) {
          break nextHlPos;
        } else {
          s.freeList.setRange(entryPos, entryPos + e.entrySizeInChunks);
          segmentIndex = si;
        }

        // The entry has passed all checks, re-insert:
        long startInsertPos = hl.hlPos(searchKey);
        long insertPos = startInsertPos;
        do {
          long hlInsertEntry = hl.readEntry(hlAddr, insertPos);
          if (hl.empty(hlInsertEntry)) {
            hl.writeEntry(hlAddr, insertPos, hl.entry(searchKey, entryPos));
            validEntries++;
            break nextHlPos;
          }
          if (insertPos == hlPos) {
            // means we made a whole loop, without finding a hole to re-insert entry,
            // even if hashLookup was corrupted and all slots are dirty now, at least
            // the slot cleared at (*) should be clear, if it is dirty, only
            // a concurrent modification thread could occupy it
            throw new ChronicleHashRecoveryFailedException(
                "Concurrent modification of ChronicleMap at "
                    + h.file()
                    + " while recovery procedure is in progress");
          }
          checkDuplicateKeys:
          if (hl.key(hlInsertEntry) == searchKey) {
            long anotherEntryPos = hl.value(hlInsertEntry);
            if (anotherEntryPos == entryPos) {
              validEntries++;
              break nextHlPos;
            }
            long currentKeyOffset = e.keyOffset;
            long currentKeySize = e.keySize;
            int currentEntrySizeInChunks = e.entrySizeInChunks;
            if (insertPos >= 0 && insertPos < hlPos) {
              // insertPos already checked
              e.readExistingEntry(anotherEntryPos);
            } else if (checkEntry(searchKey, anotherEntryPos, segmentIndex) < 0) {
              break checkDuplicateKeys;
            }
            if (e.keySize == currentKeySize
                && BytesUtil.bytesEqual(
                    s.segmentBS, currentKeyOffset, s.segmentBS, e.keyOffset, currentKeySize)) {
              log.error(
                  "Entries with duplicate keys within a tier: "
                      + "at pos {} and {} with key {}, first value is {}",
                  entryPos,
                  anotherEntryPos,
                  e.key(),
                  e.value());
              s.freeList.clearRange(entryPos, entryPos + currentEntrySizeInChunks);
              break nextHlPos;
            }
          }
          insertPos = hl.step(insertPos);
        } while (insertPos != startInsertPos);
        throw new ChronicleHashRecoveryFailedException(
            "HashLookup overflow should never occur. "
                + "It might also be concurrent access to ChronicleMap at "
                + h.file()
                + " while recovery procedure is in progress");
      }
      hlPos = hl.step(hlPos);
    } while (hlPos != 0);
    shiftHashLookupEntries();
    return segmentIndex;
  }