示例#1
0
 @Test
 public void testFileReadRequest() throws InterruptedException {
   MockFileRequest fileRequest =
       new MockFileRequest(
           "tests/res/test-file.txt",
           new FileResponse.Listener<String>() {
             @Override
             public void onResponse(String response) {
               countDownLatch.countDown();
               assertEquals("mockdatarequest", response);
             }
           },
           errorListener);
   countDownLatch = new CountDownLatch(1);
   fileQueue.start();
   fileQueue.add(fileRequest);
   countDownLatch.await();
 }
示例#2
0
  @Test
  public void testFileBlocking() {
    MockFileRequest request1 = new MockFileRequest("123", null, null);
    request1.setTag("test_tag");

    MockFileRequest request2 = new MockFileRequest("123", null, null);
    request2.setTag("test_tag_2");

    MockFileRequest request3 = new MockFileRequest("123", null, null);
    request3.setTag("test_tag");

    fileQueue.add(request1);
    fileQueue.add(request2);
    fileQueue.add(request3);

    Collection<FileRequest<?>> requestQueue = fileQueue.getRequestQueue();
    assertTrue(requestQueue.contains(request1));
    assertFalse(requestQueue.contains(request2));
    assertFalse(requestQueue.contains(request3));
  }
示例#3
0
  @Test
  public void testFileRequestCancel() throws InterruptedException {
    MockFileRequest request1 = new MockFileRequest("123", null, null);
    request1.setTag("test_tag");

    MockFileRequest request2 = new MockFileRequest("456", null, null);
    request2.setTag("test_tag_2");

    MockFileRequest request3 = new MockFileRequest("789", null, null);
    request3.setTag("test_tag");

    fileQueue.add(request1);
    fileQueue.add(request2);
    fileQueue.add(request3);
    fileQueue.cancelAll("test_tag");

    assertTrue(request1.isCanceled());
    assertFalse(request2.isCanceled());
    assertTrue(request3.isCanceled());
  }
  /**
   * Test of {@link FileQueue} can identify compressed file and provide readers to extract
   * uncompressed data only if input-compression is enabled.
   */
  @Test
  public void testFileQueueDecompression() throws IOException {
    JobConf conf = new JobConf();
    FileSystem lfs = FileSystem.getLocal(conf);
    String inputLine = "Hi Hello!";

    CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
    CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
    org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
    org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf, GzipCodec.class);

    // define the test's root temp directory
    Path rootTempDir =
        new Path(System.getProperty("test.build.data", "/tmp"))
            .makeQualified(lfs.getUri(), lfs.getWorkingDirectory());

    Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
    lfs.delete(tempDir, true);

    // create a compressed file
    Path compressedFile = new Path(tempDir, "test");
    OutputStream out =
        CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, conf);
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
    writer.write(inputLine);
    writer.close();

    compressedFile = compressedFile.suffix(".gz");
    // now read back the data from the compressed stream using FileQueue
    long fileSize = lfs.listStatus(compressedFile)[0].getLen();
    CombineFileSplit split =
        new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
    FileQueue queue = new FileQueue(split, conf);
    byte[] bytes = new byte[inputLine.getBytes().length];
    queue.read(bytes);
    queue.close();
    String readLine = new String(bytes);
    assertEquals("Compression/Decompression error", inputLine, readLine);
  }
示例#5
0
 @After
 public void tearDown() throws Exception {
   fileQueue.stop();
 }
示例#6
0
 @Override
 public void run() {
   while (!done) indexFile(queue.get());
 }
示例#7
0
  private void readSst(
      String standby,
      int bucket,
      Position pos,
      int batchSize,
      Version v,
      FileQueue queue,
      Entrys entrys,
      boolean availableRollback,
      int deep) {
    // 防止调用超时,和栈溢出异常
    if (deep > 50) {
      return;
    }

    FileMetaData file = null;
    if (pos.fileNumber() < 0) {
      file = queue.poll();
    } else {
      file = queue.poll(pos.fileNumber());
    }

    // 当file为null时,意味着当前的version所有sst文件已经迁移完成,
    // 如果存在新的version,就切换到新的version,否则切换到log模式,开始去拉去log文件的数据
    if (file == null) {
      v.release();

      pos.switchToNew();
      Version snapshot = versions.getCurrent();
      if (v != snapshot) {
        v = snapshot;
        v.retain();
        pos.version(v.hashCode());
        pos.fileNumber(-1);
        readSst(
            standby,
            bucket,
            pos,
            batchSize,
            v,
            new FileQueue(bucket, v, pos.maxFileNumber()),
            entrys,
            availableRollback,
            ++deep);
      } else {
        // 日志模式不锁定version,只锁定log文件
        // log文件的锁定逻辑见DBImpl的logQ属性
        pos.fileNumber(log.getFileNumber());
        pos.pointer(0);
        pos.fileType(LOG);
      }

      return;
    } else {
      pos.fileNumber(file.getNumber());
    }

    // 读取大于maxSeq的记录
    String key = generateKey(standby, bucket);
    SlaveIterator si = v.iterators.get(key);
    if (si == null) {
      si = new SlaveIterator(file);
    } else {
      if (si.getFileNumber() != file.getNumber()) {
        si = new SlaveIterator(file);
      }
    }
    v.iterators.put(key, si);

    if (availableRollback) {
      if (pos.pointer() > 0) {
        logger.warn("{}-bucket sst happen rollback, position={}.", bucket, pos);
        si.seekToFirst();
      } else {
        pos.pointer(file.getNumber());
      }
    }

    long maxSeq = pos.maxSeq();
    while (si.hasNext()) {
      Entry<InternalKey, Slice> e = si.next(bucket, maxSeq);
      if (e != null) {
        InternalKey ikey = e.getKey();
        pos.curMaxSeq(ikey.getSequenceNumber());
        entrys.add(
            new BlockEntry(
                ikey.getUserKey(),
                ValueType.VALUE.equals(ikey.getValueType()) ? e.getValue() : Slices.EMPTY_SLICE));
      }
      if (entrys.size() > batchSize) {
        break;
      }
    }

    // 判断文件是否已经读取完成
    if (!si.hasNext()) {
      v.iterators.remove(key);
      FileMetaData fmd = queue.peek();
      if (fmd != null) {
        pos.fileNumber(fmd.getNumber());
      } else {
        pos.fileNumber(pos.curMaxFileNumber + 1); // +1意味着该version不存在该文件
      }
    }

    // 继续读取下一个文件的数据
    if (entrys.size() < batchSize) {
      readSst(standby, bucket, pos, batchSize, v, queue, entrys, false, ++deep);
    }
  }