Beispiel #1
0
  @Test
  public void runOffsetManipulationInZooKeeperTest() {
    try {
      final String topicName = "ZookeeperOffsetHandlerTest-Topic";
      final String groupId = "ZookeeperOffsetHandlerTest-Group";

      final Long offset = (long) (Math.random() * Long.MAX_VALUE);

      CuratorFramework curatorFramework =
          ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
      kafkaServer.createTestTopic(topicName, 3, 2);

      ZookeeperOffsetHandler.setOffsetInZooKeeper(curatorFramework, groupId, topicName, 0, offset);

      Long fetchedOffset =
          ZookeeperOffsetHandler.getOffsetFromZooKeeper(curatorFramework, groupId, topicName, 0);

      curatorFramework.close();

      assertEquals(offset, fetchedOffset);
    } catch (Exception e) {
      e.printStackTrace();
      fail(e.getMessage());
    }
  }
Beispiel #2
0
  @Test(timeout = 60000)
  public void testInvalidOffset() throws Exception {
    final String topic = "invalidOffsetTopic";
    final int parallelism = 1;

    // create topic
    createTestTopic(topic, parallelism, 1);

    final StreamExecutionEnvironment env =
        StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);

    // write 20 messages into topic:
    writeSequence(env, topic, 20, parallelism);

    // set invalid offset:
    CuratorFramework curatorClient = ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
    ZookeeperOffsetHandler.setOffsetInZooKeeper(
        curatorClient, standardProps.getProperty("group.id"), topic, 0, 1234);
    curatorClient.close();

    // read from topic
    final int valuesCount = 20;
    final int startFrom = 0;
    readSequence(env, standardProps, parallelism, topic, valuesCount, startFrom);

    deleteTestTopic(topic);
  }
  @Test
  public void testNoBarrier() throws Exception {
    CuratorFramework client =
        CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    try {
      client.start();

      final DistributedBarrier barrier = new DistributedBarrier(client, "/barrier");
      Assert.assertTrue(barrier.waitOnBarrier(10, TimeUnit.SECONDS));

      // just for grins, test the infinite wait
      ExecutorService service = Executors.newSingleThreadExecutor();
      Future<Object> future =
          service.submit(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  barrier.waitOnBarrier();
                  return "";
                }
              });
      Assert.assertTrue(future.get(10, TimeUnit.SECONDS) != null);
    } finally {
      client.close();
    }
  }
  @Test
  public void testBasic() throws Exception {
    CuratorFramework client =
        CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    try {
      client.start();

      final DistributedBarrier barrier = new DistributedBarrier(client, "/barrier");
      barrier.setBarrier();

      ExecutorService service = Executors.newSingleThreadExecutor();
      service.submit(
          new Callable<Object>() {
            @Override
            public Object call() throws Exception {
              Thread.sleep(1000);
              barrier.removeBarrier();
              return null;
            }
          });

      Assert.assertTrue(barrier.waitOnBarrier(10, TimeUnit.SECONDS));
    } finally {
      client.close();
    }
  }
Beispiel #5
0
  @Override
  public void close() {
    leaderSelector.close();
    client.close();

    webServer.close();
  }
Beispiel #6
0
  /**
   * Tests that offsets are properly committed to ZooKeeper and initial offsets are read from
   * ZooKeeper.
   *
   * <p>This test is only applicable if the Flink Kafka Consumer uses the ZooKeeperOffsetHandler.
   */
  @Test(timeout = 60000)
  public void testOffsetInZookeeper() throws Exception {
    final int parallelism = 3;

    // write a sequence from 0 to 99 to each of the 3 partitions.
    final String topicName = writeSequence("testOffsetInZK", 100, parallelism, 1);

    StreamExecutionEnvironment env1 =
        StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env1.getConfig().disableSysoutLogging();
    env1.enableCheckpointing(50);
    env1.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    env1.setParallelism(parallelism);

    StreamExecutionEnvironment env2 =
        StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env2.getConfig().disableSysoutLogging();
    env2.enableCheckpointing(50);
    env2.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    env2.setParallelism(parallelism);

    readSequence(env1, standardProps, parallelism, topicName, 100, 0);

    CuratorFramework curatorClient = ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();

    Long o1 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorClient, standardProps.getProperty("group.id"), topicName, 0);
    Long o2 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorClient, standardProps.getProperty("group.id"), topicName, 1);
    Long o3 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorClient, standardProps.getProperty("group.id"), topicName, 2);

    LOG.info("Got final offsets from zookeeper o1={}, o2={}, o3={}", o1, o2, o3);

    assertTrue(o1 == null || (o1 >= 0 && o1 <= 100));
    assertTrue(o2 == null || (o2 >= 0 && o2 <= 100));
    assertTrue(o3 == null || (o3 >= 0 && o3 <= 100));

    LOG.info("Manipulating offsets");

    // set the offset to 50 for the three partitions
    ZookeeperOffsetHandler.setOffsetInZooKeeper(
        curatorClient, standardProps.getProperty("group.id"), topicName, 0, 49);
    ZookeeperOffsetHandler.setOffsetInZooKeeper(
        curatorClient, standardProps.getProperty("group.id"), topicName, 1, 49);
    ZookeeperOffsetHandler.setOffsetInZooKeeper(
        curatorClient, standardProps.getProperty("group.id"), topicName, 2, 49);

    curatorClient.close();

    // create new env
    readSequence(env2, standardProps, parallelism, topicName, 50, 50);

    deleteTestTopic(topicName);
  }
Beispiel #7
0
  @Test(timeout = 60000)
  public void testOffsetAutocommitTest() throws Exception {
    final int parallelism = 3;

    // write a sequence from 0 to 99 to each of the 3 partitions.
    final String topicName = writeSequence("testOffsetAutocommit", 100, parallelism, 1);

    StreamExecutionEnvironment env =
        StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    // NOTE: We are not enabling the checkpointing!
    env.getConfig().disableSysoutLogging();
    env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    env.setParallelism(parallelism);

    // the readSequence operation sleeps for 20 ms between each record.
    // setting a delay of 25*20 = 500 for the commit interval makes
    // sure that we commit roughly 3-4 times while reading, however
    // at least once.
    Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.commit.interval.ms", "500");

    // read so that the offset can be committed to ZK
    readSequence(env, readProps, parallelism, topicName, 100, 0);

    // get the offset
    CuratorFramework curatorFramework =
        ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();

    Long o1 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 0);
    Long o2 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 1);
    Long o3 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 2);
    curatorFramework.close();
    LOG.info("Got final offsets from zookeeper o1={}, o2={}, o3={}", o1, o2, o3);

    // ensure that the offset has been committed
    boolean atLeastOneOffsetSet =
        (o1 != null && o1 > 0 && o1 <= 100)
            || (o2 != null && o2 > 0 && o2 <= 100)
            || (o3 != null && o3 > 0 && o3 <= 100);
    assertTrue(
        "Expecting at least one offset to be set o1=" + o1 + " o2=" + o2 + " o3=" + o3,
        atLeastOneOffsetSet);

    deleteTestTopic(topicName);
  }
Beispiel #8
0
  @Override
  public void close() {
    if (state.compareAndSet(State.OPEN, State.CLOSED)) {
      for (Map.Entry<String, Entry> mapEntry : things.entrySet()) {
        Entry entry = mapEntry.getValue();
        if (entry.closer != null) {
          log.debug(
              String.format(
                  "Closing left over thing. Type: %s - Id: %s",
                  entry.thing.getClass(), mapEntry.getKey()));
          entry.closer.close();
        }
      }
      things.clear();

      client.close();
      events.clear();
    }
  }
  public DistributedClusterState(Map<Object, Object> _conf) throws Exception {
    conf = _conf;

    // just mkdir STORM_ZOOKEEPER_ROOT dir
    CuratorFramework _zk = mkZk();
    String path = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT));
    zkobj.mkdirs(_zk, path);
    _zk.close();

    active = new AtomicBoolean(true);

    watcher =
        new WatcherCallBack() {
          @Override
          public void execute(KeeperState state, EventType type, String path) {
            if (active.get()) {
              if (!(state.equals(KeeperState.SyncConnected))) {
                LOG.warn(
                    "Received event "
                        + state
                        + ":"
                        + type
                        + ":"
                        + path
                        + " with disconnected Zookeeper.");
              } else {
                LOG.info("Received event " + state + ":" + type + ":" + path);
              }

              if (!type.equals(EventType.None)) {
                for (Entry<UUID, ClusterStateCallback> e : callbacks.entrySet()) {
                  ClusterStateCallback fn = e.getValue();
                  fn.execute(type, path);
                }
              }
            }
          }
        };
    zk = null;
    zk = mkZk(watcher);
  }
Beispiel #10
0
  void doEvaluate(Statement base) throws Throwable {
    try {
      cluster = new TestingCluster(3);
      cluster.start();

      client = newClient(cluster.getConnectString(), new RetryOneTime(200 /* ms */));
      client.start();

      checkState(
          client.blockUntilConnected(5, TimeUnit.SECONDS),
          "failed to connect to zookeeper in 5 seconds");

      base.evaluate();
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      throw new IllegalStateException("Interrupted while connecting to ZooKeeper", e);
    } finally {
      client.close();
      cluster.close();
    }
  }
  @Test
  public void testServerCrash() throws Exception {
    final int TIMEOUT = 1000;

    final CuratorFramework client =
        CuratorFrameworkFactory.builder()
            .connectString(server.getConnectString())
            .connectionTimeoutMs(TIMEOUT)
            .retryPolicy(new RetryOneTime(1))
            .build();
    try {
      client.start();

      final DistributedBarrier barrier = new DistributedBarrier(client, "/barrier");
      barrier.setBarrier();

      final ExecutorService service = Executors.newSingleThreadExecutor();
      Future<Object> future =
          service.submit(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  Thread.sleep(TIMEOUT / 2);
                  server.stop();
                  return null;
                }
              });

      barrier.waitOnBarrier(TIMEOUT * 2, TimeUnit.SECONDS);
      future.get();
      Assert.fail();
    } catch (KeeperException.ConnectionLossException expected) {
      // expected
    } finally {
      client.close();
    }
  }
 @Override
 public void close() throws IOException {
   curatorClient.close();
 }
 @Override
 public void close() {
   this.active.set(false);
   zk.close();
 }
  @Test
  public void testRun() throws Exception {
    TestingCluster localCluster = new TestingCluster(1);
    localCluster.start();

    CuratorFramework localCf =
        CuratorFrameworkFactory.builder()
            .connectString(localCluster.getConnectString())
            .retryPolicy(new ExponentialBackoffRetry(1, 10))
            .compressionProvider(new PotentiallyGzippedCompressionProvider(false))
            .build();
    localCf.start();

    TestingCluster remoteCluster = new TestingCluster(1);
    remoteCluster.start();

    CuratorFramework remoteCf =
        CuratorFrameworkFactory.builder()
            .connectString(remoteCluster.getConnectString())
            .retryPolicy(new ExponentialBackoffRetry(1, 10))
            .compressionProvider(new PotentiallyGzippedCompressionProvider(false))
            .build();
    remoteCf.start();

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    DruidClusterBridgeConfig config =
        new DruidClusterBridgeConfig() {
          @Override
          public String getTier() {
            return DruidServer.DEFAULT_TIER;
          }

          @Override
          public Duration getStartDelay() {
            return new Duration(0);
          }

          @Override
          public Duration getPeriod() {
            return new Duration(Long.MAX_VALUE);
          }

          @Override
          public String getBrokerServiceName() {
            return "testz0rz";
          }

          @Override
          public int getPriority() {
            return 0;
          }
        };

    ScheduledExecutorFactory factory = ScheduledExecutors.createFactory(new Lifecycle());

    DruidNode me = new DruidNode("me", "localhost", 8080);

    AtomicReference<LeaderLatch> leaderLatch =
        new AtomicReference<>(new LeaderLatch(localCf, "test"));

    ZkPathsConfig zkPathsConfig =
        new ZkPathsConfig() {
          @Override
          public String getZkBasePath() {
            return "/druid";
          }
        };
    DruidServerMetadata metadata =
        new DruidServerMetadata("test", "localhost", 1000, "bridge", DruidServer.DEFAULT_TIER, 0);
    DbSegmentPublisher dbSegmentPublisher = EasyMock.createMock(DbSegmentPublisher.class);
    EasyMock.replay(dbSegmentPublisher);
    DatabaseSegmentManager databaseSegmentManager =
        EasyMock.createMock(DatabaseSegmentManager.class);
    EasyMock.replay(databaseSegmentManager);
    ServerView serverView = EasyMock.createMock(ServerView.class);
    EasyMock.replay(serverView);

    BridgeZkCoordinator bridgeZkCoordinator =
        new BridgeZkCoordinator(
            jsonMapper,
            zkPathsConfig,
            new SegmentLoaderConfig(),
            metadata,
            remoteCf,
            dbSegmentPublisher,
            databaseSegmentManager,
            serverView);

    Announcer announcer = new Announcer(remoteCf, Executors.newSingleThreadExecutor());
    announcer.start();
    announcer.announce(
        zkPathsConfig.getAnnouncementsPath() + "/" + me.getHost(),
        jsonMapper.writeValueAsBytes(me));

    BatchDataSegmentAnnouncer batchDataSegmentAnnouncer =
        EasyMock.createMock(BatchDataSegmentAnnouncer.class);
    BatchServerInventoryView batchServerInventoryView =
        EasyMock.createMock(BatchServerInventoryView.class);
    EasyMock.expect(batchServerInventoryView.getInventory())
        .andReturn(
            Arrays.asList(
                new DruidServer("1", "localhost", 117, "historical", DruidServer.DEFAULT_TIER, 0),
                new DruidServer("2", "localhost", 1, "historical", DruidServer.DEFAULT_TIER, 0)));
    batchServerInventoryView.registerSegmentCallback(
        EasyMock.<Executor>anyObject(), EasyMock.<ServerView.SegmentCallback>anyObject());
    batchServerInventoryView.registerServerCallback(
        EasyMock.<Executor>anyObject(), EasyMock.<ServerView.ServerCallback>anyObject());
    EasyMock.expectLastCall();
    batchServerInventoryView.start();
    EasyMock.expectLastCall();
    batchServerInventoryView.stop();
    EasyMock.expectLastCall();
    EasyMock.replay(batchServerInventoryView);

    DruidClusterBridge bridge =
        new DruidClusterBridge(
            jsonMapper,
            config,
            factory,
            me,
            localCf,
            leaderLatch,
            bridgeZkCoordinator,
            announcer,
            batchDataSegmentAnnouncer,
            batchServerInventoryView);

    bridge.start();

    int retry = 0;
    while (!bridge.isLeader()) {
      if (retry > 5) {
        throw new ISE("Unable to become leader");
      }

      Thread.sleep(100);
      retry++;
    }

    String path = "/druid/announcements/localhost:8080";
    retry = 0;
    while (remoteCf.checkExists().forPath(path) == null) {
      if (retry > 5) {
        throw new ISE("Unable to announce");
      }

      Thread.sleep(100);
      retry++;
    }

    boolean verified = verifyUpdate(jsonMapper, path, remoteCf);
    retry = 0;
    while (!verified) {
      if (retry > 5) {
        throw new ISE("No updates to bridge node occurred");
      }

      Thread.sleep(100);
      retry++;

      verified = verifyUpdate(jsonMapper, path, remoteCf);
    }

    announcer.stop();
    bridge.stop();

    remoteCf.close();
    remoteCluster.close();
    localCf.close();
    localCluster.close();

    EasyMock.verify(batchServerInventoryView);
    EasyMock.verify(dbSegmentPublisher);
    EasyMock.verify(databaseSegmentManager);
    EasyMock.verify(serverView);
  }
Beispiel #15
0
  /**
   * This test ensures that when the consumers retrieve some start offset from kafka (earliest,
   * latest), that this offset is committed to Zookeeper, even if some partitions are not read
   *
   * <p>Test: - Create 3 topics - write 50 messages into each. - Start three consumers with
   * auto.offset.reset='latest' and wait until they committed into ZK. - Check if the offsets in ZK
   * are set to 50 for the three partitions
   *
   * <p>See FLINK-3440 as well
   */
  @Test(timeout = 60000)
  public void testKafkaOffsetRetrievalToZookeeper() throws Exception {
    final int parallelism = 3;

    // write a sequence from 0 to 49 to each of the 3 partitions.
    final String topicName = writeSequence("testKafkaOffsetToZk", 50, parallelism, 1);

    final StreamExecutionEnvironment env2 =
        StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env2.getConfig().disableSysoutLogging();
    env2.getConfig().setRestartStrategy(RestartStrategies.noRestart());
    env2.setParallelism(parallelism);
    env2.enableCheckpointing(200);

    Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.offset.reset", "latest");

    DataStream<String> stream =
        env2.addSource(kafkaServer.getConsumer(topicName, new SimpleStringSchema(), readProps));
    stream.addSink(new DiscardingSink<String>());

    final AtomicReference<Throwable> errorRef = new AtomicReference<>();
    final Thread runner =
        new Thread("runner") {
          @Override
          public void run() {
            try {
              env2.execute();
            } catch (Throwable t) {
              if (!(t.getCause() instanceof JobCancellationException)) {
                errorRef.set(t);
              }
            }
          }
        };
    runner.start();

    final CuratorFramework curatorFramework =
        ((KafkaTestEnvironmentImpl) kafkaServer).createCuratorClient();
    final Long l49 = 49L;

    final long deadline = 30000 + System.currentTimeMillis();
    do {
      Long o1 =
          ZookeeperOffsetHandler.getOffsetFromZooKeeper(
              curatorFramework, standardProps.getProperty("group.id"), topicName, 0);
      Long o2 =
          ZookeeperOffsetHandler.getOffsetFromZooKeeper(
              curatorFramework, standardProps.getProperty("group.id"), topicName, 1);
      Long o3 =
          ZookeeperOffsetHandler.getOffsetFromZooKeeper(
              curatorFramework, standardProps.getProperty("group.id"), topicName, 2);

      if (l49.equals(o1) && l49.equals(o2) && l49.equals(o3)) {
        break;
      }

      Thread.sleep(100);
    } while (System.currentTimeMillis() < deadline);

    // cancel the job
    JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout));

    final Throwable t = errorRef.get();
    if (t != null) {
      throw new RuntimeException("Job failed with an exception", t);
    }

    // check if offsets are correctly in ZK
    Long o1 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 0);
    Long o2 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 1);
    Long o3 =
        ZookeeperOffsetHandler.getOffsetFromZooKeeper(
            curatorFramework, standardProps.getProperty("group.id"), topicName, 2);
    Assert.assertEquals(Long.valueOf(49L), o1);
    Assert.assertEquals(Long.valueOf(49L), o2);
    Assert.assertEquals(Long.valueOf(49L), o3);

    curatorFramework.close();
  }
 @After
 public void tearDown() throws Exception {
   workerTaskMonitor.stop();
   cf.close();
   testingCluster.stop();
 }
 @Override
 public void close() {
   curatorFramework.close();
 }
 @Override
 public void destroy() throws Exception {
   zkClient.close();
 }
 public void close() {
   client.close();
 }
Beispiel #20
0
 public void close() {
   _curator.close();
   _curator = null;
 }