コード例 #1
0
  @Test
  public void testKafkaConsumer09Read() throws IOException, StageException {
    int zkConnectionTimeout = 6000;
    int zkSessionTimeout = 6000;

    EmbeddedZookeeper zookeeper = new EmbeddedZookeeper();
    String zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
    ZkUtils zkUtils =
        ZkUtils.apply(
            zkConnect, zkSessionTimeout, zkConnectionTimeout, JaasUtils.isZkSecurityEnabled());

    int port = TestUtil.getFreePort();
    KafkaServer kafkaServer = TestUtil.createKafkaServer(port, zkConnect);

    final String topic = "TestKafkaConsumer09_1";
    final String message = "Hello StreamSets";

    Source.Context sourceContext =
        ContextInfoCreator.createSourceContext(
            "s", false, OnRecordError.TO_ERROR, ImmutableList.of("a"));

    Map<String, Object> props = new HashMap<>();
    props.put("auto.commit.interval.ms", "1000");
    props.put("auto.offset.reset", "earliest");
    props.put("session.timeout.ms", "30000");
    SdcKafkaConsumer sdcKafkaConsumer =
        createSdcKafkaConsumer("localhost:" + port, topic, 1000, sourceContext, props, "test");
    sdcKafkaConsumer.validate(new ArrayList<Stage.ConfigIssue>(), sourceContext);
    sdcKafkaConsumer.init();

    // produce some messages to topic
    produce(topic, "localhost:" + port, message);

    // read
    List<MessageAndOffset> read = new ArrayList<>();
    while (read.size() < 10) {
      MessageAndOffset messageAndOffset = sdcKafkaConsumer.read();
      if (messageAndOffset != null) {
        read.add(messageAndOffset);
      }
    }
    // verify
    Assert.assertNotNull(read);
    Assert.assertEquals(10, read.size());
    verify(read, message);

    // delete topic and shutdown
    AdminUtils.deleteTopic(zkUtils, topic);
    kafkaServer.shutdown();
    zookeeper.shutdown();
  }
コード例 #2
0
 @Test
 public void testGetHdfsConfiguration() throws Exception {
   ClusterHdfsDSource dSource = new ForTestClusterHdfsDSource();
   configure(dSource, dir.toString());
   ClusterHdfsSource clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
   try {
     clusterHdfsSource.init(
         null,
         ContextInfoCreator.createSourceContext(
             "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
     Assert.assertNotNull(clusterHdfsSource.getConfiguration());
     assertEquals("X", clusterHdfsSource.getConfiguration().get("x"));
   } finally {
     clusterHdfsSource.destroy();
   }
 }
コード例 #3
0
  @Test
  public void testWrongHDFSDirLocation() throws Exception {
    ClusterHdfsDSource dSource = new ForTestClusterHdfsDSource();
    configure(dSource, dir.toUri().getPath());
    dSource.hdfsUri = "/pathwithnoschemeorauthority";
    ClusterHdfsSource clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
    try {
      List<ConfigIssue> issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_02"));

      dSource.hdfsUri = "file://localhost:8020/";
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_12"));

      dSource.hdfsUri = "hdfs:///noauthority";
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_13"));

      dSource.hdfsUri = "hdfs://localhost:8020";
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_11"));

      dSource.hdfsUri = miniDFS.getURI().toString();
      dSource.hdfsDirLocations = Arrays.asList("/pathdoesnotexist");
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_10"));

      dSource.hdfsUri = miniDFS.getURI().toString();
      dSource.hdfsDirLocations = Arrays.asList(dir.toUri().getPath());
      FileSystem fs = miniDFS.getFileSystem();
      Path someFile = new Path(new Path(dir.toUri()), "/someFile");
      fs.create(someFile).close();
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 0, issues.size());

      dSource.hdfsUri = null;
      dSource.hdfsConfigs.put(
          CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, miniDFS.getURI().toString());
      someFile = new Path(new Path(dir.toUri()), "/someFile2");
      fs.create(someFile).close();
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 0, issues.size());

      Path dummyFile = new Path(new Path(dir.toUri()), "/dummyFile");
      fs.create(dummyFile).close();
      dSource.hdfsUri = miniDFS.getURI().toString();
      dSource.hdfsDirLocations = Arrays.asList(dummyFile.toUri().getPath());
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_15"));

      Path emptyDir = new Path(dir.toUri().getPath(), "emptyDir");
      fs.mkdirs(emptyDir);
      dSource.hdfsUri = miniDFS.getURI().toString();
      dSource.hdfsDirLocations = Arrays.asList(emptyDir.toUri().getPath());
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 1, issues.size());
      assertTrue(String.valueOf(issues), issues.get(0).toString().contains("HADOOPFS_16"));

      Path path1 = new Path(emptyDir, "path1");
      fs.create(path1).close();
      dSource.hdfsUri = miniDFS.getURI().toString();
      dSource.hdfsDirLocations = Arrays.asList(emptyDir.toUri().getPath());
      clusterHdfsSource = (ClusterHdfsSource) dSource.createSource();
      issues =
          clusterHdfsSource.init(
              null,
              ContextInfoCreator.createSourceContext(
                  "myInstance", false, OnRecordError.TO_ERROR, ImmutableList.of("lane")));
      assertEquals(String.valueOf(issues), 0, issues.size());
    } finally {
      clusterHdfsSource.destroy();
    }
  }