/** * 获取小于指定批次大小的数据, 该方法会修改参数属性<code>Position</code> * * @return */ public Entrys get(String standby, int bucket, Position pos, int batchSize) { Entrys entrys = new Entrys(); if (SST.equals(pos.fileType())) { Version v = null; if (!pos.hasVersion()) { v = versions.getCurrent(); v.retain(); pos.version(v.hashCode()); } else { Set<Version> active = versions.activeVersions(); if (active != null) { for (Version av : active) { if (av.hashCode() == pos.version()) { v = av; } } } } if (v == null) { throw new IfileNotFoundException( "There is no corresponding version, bucket= " + bucket + " Hashcode=" + pos.version()); } FileQueue queue = new FileQueue(bucket, v, pos.maxFileNumber()); readSst(standby, bucket, pos, batchSize, v, queue, entrys, true, 1); } else if (LOG.equals(pos.fileType())) { readLog(standby, bucket, pos, batchSize, entrys, true, 1); if (entrys.size() < batchSize) { pos.hasNext(false); } } else { // TODO } return entrys; }
private void readSst( String standby, int bucket, Position pos, int batchSize, Version v, FileQueue queue, Entrys entrys, boolean availableRollback, int deep) { // 防止调用超时,和栈溢出异常 if (deep > 50) { return; } FileMetaData file = null; if (pos.fileNumber() < 0) { file = queue.poll(); } else { file = queue.poll(pos.fileNumber()); } // 当file为null时,意味着当前的version所有sst文件已经迁移完成, // 如果存在新的version,就切换到新的version,否则切换到log模式,开始去拉去log文件的数据 if (file == null) { v.release(); pos.switchToNew(); Version snapshot = versions.getCurrent(); if (v != snapshot) { v = snapshot; v.retain(); pos.version(v.hashCode()); pos.fileNumber(-1); readSst( standby, bucket, pos, batchSize, v, new FileQueue(bucket, v, pos.maxFileNumber()), entrys, availableRollback, ++deep); } else { // 日志模式不锁定version,只锁定log文件 // log文件的锁定逻辑见DBImpl的logQ属性 pos.fileNumber(log.getFileNumber()); pos.pointer(0); pos.fileType(LOG); } return; } else { pos.fileNumber(file.getNumber()); } // 读取大于maxSeq的记录 String key = generateKey(standby, bucket); SlaveIterator si = v.iterators.get(key); if (si == null) { si = new SlaveIterator(file); } else { if (si.getFileNumber() != file.getNumber()) { si = new SlaveIterator(file); } } v.iterators.put(key, si); if (availableRollback) { if (pos.pointer() > 0) { logger.warn("{}-bucket sst happen rollback, position={}.", bucket, pos); si.seekToFirst(); } else { pos.pointer(file.getNumber()); } } long maxSeq = pos.maxSeq(); while (si.hasNext()) { Entry<InternalKey, Slice> e = si.next(bucket, maxSeq); if (e != null) { InternalKey ikey = e.getKey(); pos.curMaxSeq(ikey.getSequenceNumber()); entrys.add( new BlockEntry( ikey.getUserKey(), ValueType.VALUE.equals(ikey.getValueType()) ? e.getValue() : Slices.EMPTY_SLICE)); } if (entrys.size() > batchSize) { break; } } // 判断文件是否已经读取完成 if (!si.hasNext()) { v.iterators.remove(key); FileMetaData fmd = queue.peek(); if (fmd != null) { pos.fileNumber(fmd.getNumber()); } else { pos.fileNumber(pos.curMaxFileNumber + 1); // +1意味着该version不存在该文件 } } // 继续读取下一个文件的数据 if (entrys.size() < batchSize) { readSst(standby, bucket, pos, batchSize, v, queue, entrys, false, ++deep); } }
private void readLog( String standby, int bucket, Position pos, int batchSize, Entrys entrys, boolean availableRollback, int deep) { if (deep > 50) { return; } long fileNumber = pos.fileNumber(); if (!logQ.contains(fileNumber)) { throw new IfileNotFoundException( bucket + "-bucket log file[" + fileNumber + "] does not exist!"); } String key = generateKey(standby, bucket, fileNumber); LogReader0 logReader = logCache.get(key); if (logReader == null) { return; } if (availableRollback) { if (pos.pointer() > 0) { logger.warn("{}-bucket log happen rollback, position={}.", bucket, pos); logReader.setPosition(pos.pointer()); } else { // 设置回滚点 pos.pointer(logReader.pointer()); } } boolean full = false; for (Slice record = logReader.readRecord(); record != null; record = logReader.readRecord()) { SliceInput sliceInput = record.input(); // read header if (sliceInput.available() < 12) { logReader.reportCorruption(sliceInput.available(), "log record too small"); continue; } long sequenceBegin = sliceInput.readLong(); int updateSize = sliceInput.readInt(); // read entries try { int c = readWriteBatch(bucket, sliceInput, updateSize, entrys); if (c < 1) { continue; } } catch (IOException e) { Throwables.propagate(e); } // update the maxSequence pos.curMaxSeq(sequenceBegin + updateSize - 1); pos.maxSeq(pos.curMaxSeq()); if (entrys.size() >= batchSize) { full = true; break; } } if (!full && logReader.eof(logsOffset.get(fileNumber))) { boolean next = false; for (long n : logQ) { if (next) { pos.fileNumber(n); break; } if (n == fileNumber) { next = true; } } if (fileNumber != pos.fileNumber()) { if (entrys.size() < batchSize) { readLog(standby, bucket, pos, batchSize, entrys, false, ++deep); } } } }