コード例 #1
0
ファイル: Ddb.java プロジェクト: saitxuc/hippo-java
  private int readWriteBatch(int bucket, SliceInput record, int updateSize, Entrys entries)
      throws IOException {
    List<Entry<Slice, Slice>> list = Lists.newArrayListWithCapacity(updateSize);
    int count = 0;
    while (record.isReadable()) {
      count++;
      ValueType valueType = ValueType.getValueTypeByPersistentId(record.readByte());
      if (valueType == VALUE) {
        Slice key = readLengthPrefixedBytes(record);
        Slice value = readLengthPrefixedBytes(record);
        list.add(new BlockEntry(key, value));
      } else if (valueType == DELETION) {
        Slice key = readLengthPrefixedBytes(record);
        list.add(new BlockEntry(key, Slices.EMPTY_SLICE));
      } else {
        throw new IllegalStateException("Unexpected value type " + valueType);
      }
    }

    if (count != updateSize) {
      throw new IOException(
          String.format(
              "Expected %d entries in log record but found %s entries", updateSize, entries));
    }

    int available = 0;
    for (Entry<Slice, Slice> e : list) {
      InternalKey iKey =
          new InternalKey(e.getKey(), 0, e.getValue().length() != 0 ? VALUE : DELETION);
      if (bucket == iKey.bucket()) {
        entries.add(e);
        available++;
      }
    }
    return available;
  }
コード例 #2
0
  // Writes a stream of chunks such that no chunk is split across a block boundary
  @Override
  public synchronized void addRecord(Slice record, boolean force) throws IOException {
    Preconditions.checkState(!closed.get(), "Log has been closed");

    SliceInput sliceInput = record.input();

    // used to track first, middle and last blocks
    boolean begin = true;

    // Fragment the record int chunks as necessary and write it.  Note that if record
    // is empty, we still want to iterate once to write a single
    // zero-length chunk.
    do {
      int bytesRemainingInBlock = BLOCK_SIZE - blockOffset;
      Preconditions.checkState(bytesRemainingInBlock >= 0);

      // Switch to a new block if necessary
      if (bytesRemainingInBlock < HEADER_SIZE) {
        if (bytesRemainingInBlock > 0) {
          // Fill the rest of the block with zeros
          // todo lame... need a better way to write zeros
          fileChannel.write(ByteBuffer.allocate(bytesRemainingInBlock));
        }
        blockOffset = 0;
        bytesRemainingInBlock = BLOCK_SIZE - blockOffset;
      }

      // Invariant: we never leave less than HEADER_SIZE bytes available in a block
      int bytesAvailableInBlock = bytesRemainingInBlock - HEADER_SIZE;
      Preconditions.checkState(bytesAvailableInBlock >= 0);

      // if there are more bytes in the record then there are available in the block,
      // fragment the record; otherwise write to the end of the record
      boolean end;
      int fragmentLength;
      if (sliceInput.available() > bytesAvailableInBlock) {
        end = false;
        fragmentLength = bytesAvailableInBlock;
      } else {
        end = true;
        fragmentLength = sliceInput.available();
      }

      // determine block type
      LogChunkType type;
      if (begin && end) {
        type = LogChunkType.FULL;
      } else if (begin) {
        type = LogChunkType.FIRST;
      } else if (end) {
        type = LogChunkType.LAST;
      } else {
        type = LogChunkType.MIDDLE;
      }

      // write the chunk
      writeChunk(type, sliceInput.readSlice(fragmentLength));

      // we are no longer on the first chunk
      begin = false;
    } while (sliceInput.isReadable());

    if (force) {
      fileChannel.force(false);
    }
  }
コード例 #3
0
ファイル: Ddb.java プロジェクト: saitxuc/hippo-java
  private void readLog(
      String standby,
      int bucket,
      Position pos,
      int batchSize,
      Entrys entrys,
      boolean availableRollback,
      int deep) {
    if (deep > 50) {
      return;
    }

    long fileNumber = pos.fileNumber();
    if (!logQ.contains(fileNumber)) {
      throw new IfileNotFoundException(
          bucket + "-bucket log file[" + fileNumber + "] does not exist!");
    }

    String key = generateKey(standby, bucket, fileNumber);
    LogReader0 logReader = logCache.get(key);
    if (logReader == null) {
      return;
    }

    if (availableRollback) {
      if (pos.pointer() > 0) {
        logger.warn("{}-bucket log happen rollback, position={}.", bucket, pos);
        logReader.setPosition(pos.pointer());
      } else {
        // 设置回滚点
        pos.pointer(logReader.pointer());
      }
    }

    boolean full = false;
    for (Slice record = logReader.readRecord(); record != null; record = logReader.readRecord()) {
      SliceInput sliceInput = record.input();
      // read header
      if (sliceInput.available() < 12) {
        logReader.reportCorruption(sliceInput.available(), "log record too small");
        continue;
      }
      long sequenceBegin = sliceInput.readLong();
      int updateSize = sliceInput.readInt();

      // read entries
      try {
        int c = readWriteBatch(bucket, sliceInput, updateSize, entrys);
        if (c < 1) {
          continue;
        }
      } catch (IOException e) {
        Throwables.propagate(e);
      }

      // update the maxSequence
      pos.curMaxSeq(sequenceBegin + updateSize - 1);
      pos.maxSeq(pos.curMaxSeq());

      if (entrys.size() >= batchSize) {
        full = true;
        break;
      }
    }

    if (!full && logReader.eof(logsOffset.get(fileNumber))) {
      boolean next = false;
      for (long n : logQ) {
        if (next) {
          pos.fileNumber(n);
          break;
        }
        if (n == fileNumber) {
          next = true;
        }
      }

      if (fileNumber != pos.fileNumber()) {
        if (entrys.size() < batchSize) {
          readLog(standby, bucket, pos, batchSize, entrys, false, ++deep);
        }
      }
    }
  }