@Test
  public void testZkClientWhenZKIsDownAndRestarts() throws Exception {

    // Iterate updating the timestamp and check the final value
    long previousMaxTimestamp = INITIAL_MAX_TS_VALUE;
    for (int i = 0; i < ITERATION_COUNT; i++) {
      long newMaxTimestamp = previousMaxTimestamp + 1_000_000;
      storage.updateMaxTimestamp(previousMaxTimestamp, newMaxTimestamp);
      previousMaxTimestamp = newMaxTimestamp;
    }
    assertEquals(storage.getMaxTimestamp(), 1_000_000 * ITERATION_COUNT);

    // Stop ZK Server, expect the IO exception, reconnect and get the right value
    LOG.info("Stopping ZK Server");
    zkServer.stop();
    LOG.info("ZK Server Stopped");

    try {
      storage.getMaxTimestamp();
      fail();
    } catch (IOException ioe) {
      LOG.info("Expected exception", ioe);
    }

    LOG.info("Restarting ZK again");
    zkServer.restart();
    assertEquals(storage.getMaxTimestamp(), 1_000_000 * ITERATION_COUNT);
  }
  public static void main(String[] args) {
    TestingServer server = null;
    CuratorFramework client = null;
    CuratorFramework optionsClient = null;

    try {
      server = new TestingServer();
      client = createSimple(server.getConnectString());
      client.start();
      client.create().creatingParentsIfNeeded().forPath(PATH, "test".getBytes());

      optionsClient =
          createWithOptions(
              server.getConnectString(), new ExponentialBackoffRetry(1000, 3), 1000, 1000);
      optionsClient.start();
      log.info("[{}]", new String(optionsClient.getData().forPath(PATH)));
    } catch (Exception e) {
      log.info("exception throws cause {}", Throwables.getStackTraceAsString(e));
    } finally {
      if (client != null) {
        CloseableUtils.closeQuietly(client);
      }
      if (optionsClient != null) {
        CloseableUtils.closeQuietly(optionsClient);
      }
    }
  }
  public static void main(String[] args) throws Exception {
    TestingServer server = new TestingServer();
    CuratorFramework client = null;
    DistributedQueue<String> queue = null;
    try {
      client =
          CuratorFrameworkFactory.newClient(
              server.getConnectString(), new ExponentialBackoffRetry(1000, 3));
      client
          .getCuratorListenable()
          .addListener(
              new CuratorListener() {
                @Override
                public void eventReceived(CuratorFramework client, CuratorEvent event)
                    throws Exception {
                  System.out.println("CuratorEvent: " + event.getType().name());
                }
              });

      client.start();
      AkinDistributedBlockingQueue<String> consumerQueue =
          new AkinDistributedBlockingQueue<String>(
              null,
              new ConnectionStateListener() {

                @Override
                public void stateChanged(CuratorFramework client, ConnectionState newState) {}
              });
      QueueBuilder<String> builder =
          QueueBuilder.builder(client, consumerQueue, createQueueSerializer(), PATH);
      queue = builder.buildQueue();
      consumerQueue.setDistributedQueue(queue);
      queue.start();

      for (int i = 0; i < 10; i++) {
        queue.put(" test-" + i);
        Thread.sleep((long) (3 * Math.random()));
      }

      Thread.sleep(20000);

      for (Object object : consumerQueue) {
        System.out.println(consumerQueue.poll());
      }

    } catch (Exception ex) {

    } finally {
      CloseableUtils.closeQuietly(queue);
      CloseableUtils.closeQuietly(client);
      CloseableUtils.closeQuietly(server);
    }
  }
Exemple #4
0
 @Override
 protected void stopServer() throws Exception {
   super.stopServer();
   if (m_zkServer != null) {
     m_zkServer.close();
   }
 }
Exemple #5
0
  public void start() throws Exception {
    zkServer = new TestingServer(zkPort, true);
    zkClient = KafkaUtils.createZkClient(zkServer.getConnectString());

    kafkaServer = new KafkaServerStartable(new KafkaConfig(getServerProperties()));
    kafkaServer.startup();
    LOGGER.debug("Started Kafka server at port {}", kafkaPort);
  }
Exemple #6
0
 public void produce(SimplifiedLog message, String topicName) {
   if (producer == null)
     producer =
         KafkaUtils.createProducer(
             KafkaUtils.createZkClient(zkServer.getConnectString()), ASYNC, false);
   producer.send(new KeyedMessage<>(topicName, message.getHostName(), message));
   LOGGER.debug("Sent message: {}", message);
 }
Exemple #7
0
 @Override
 public void contextDestroyed(ServletContextEvent sce) {
   try {
     testZk.close();
   } catch (Exception e) {
     // OK
   }
   super.contextDestroyed(sce);
 }
  public void stopTFS() throws Exception {
    mClientPool.close();

    mWorker.stop();
    for (int k = 0; k < mNumOfMasters; k++) {
      mMasters.get(k).stop();
    }
    mCuratorServer.stop();
  }
Exemple #9
0
 private Properties getServerProperties() {
   Properties serverProperties = new Properties();
   serverProperties.put("zookeeper.connect", zkServer.getConnectString());
   serverProperties.put("broker.id", "1");
   serverProperties.put("host.name", "localhost");
   serverProperties.put("port", String.valueOf(kafkaPort));
   serverProperties.put("log.dir", FileUtils.createTmpDir("embedded-kafka"));
   serverProperties.put("log.flush.interval.messages", "1");
   return serverProperties;
 }
  public void start() throws Exception {
    zkServer = new TestingServer(zkPort, true);
    zkClient = new ZkClient(zkServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$);

    File logs = Files.createTempDirectory("kafka_tmp").toFile();
    logs.deleteOnExit();
    LOGGER.debug("Created temp log dir: {}", logs.getAbsolutePath());

    Properties serverProperties = new Properties();
    serverProperties.put("zookeeper.connect", zkServer.getConnectString());
    serverProperties.put("broker.id", "1");
    serverProperties.put("host.name", "localhost");
    serverProperties.put("port", String.valueOf(kafkaPort));
    serverProperties.put("log.dir", logs.getAbsolutePath());
    serverProperties.put("log.flush.interval.messages", "1");

    kafkaServer = new KafkaServerStartable(new KafkaConfig(serverProperties));
    kafkaServer.startup();
    AdminUtils.createTopic(zkClient, topicName, topicPartitions, 1, new Properties());
  }
 public void stop() throws IOException {
   if (zkClient != null) {
     zkClient.close();
   }
   if (kafkaServer != null) {
     kafkaServer.shutdown();
   }
   if (zkServer != null) {
     zkServer.stop();
   }
 }
  public void stopTFS() throws Exception {
    mClientPool.close();

    mWorker.stop();
    if (LineageUtils.isLineageEnabled(WorkerContext.getConf())) {
      mLineageWorker.stop();
    }
    for (int k = 0; k < mNumOfMasters; k++) {
      mMasters.get(k).stop();
    }
    mCuratorServer.stop();
  }
 private void setupServerAndCurator() throws Exception {
   server = new TestingServer();
   timing = new Timing();
   curator =
       CuratorFrameworkFactory.builder()
           .connectString(server.getConnectString())
           .sessionTimeoutMs(timing.session())
           .connectionTimeoutMs(timing.connection())
           .retryPolicy(new RetryOneTime(1))
           .compressionProvider(new PotentiallyGzippedCompressionProvider(true))
           .build();
 }
  @Override
  public void shutdown() {
    if (brokers != null) {
      for (KafkaServer broker : brokers) {
        if (broker != null) {
          broker.shutdown();
        }
      }
      brokers.clear();
    }

    if (zookeeper != null) {
      try {
        zookeeper.stop();
        zookeeper.close();
      } catch (Exception e) {
        LOG.warn("ZK.stop() failed", e);
      }
      zookeeper = null;
    }

    // clean up the temp spaces

    if (tmpKafkaParent != null && tmpKafkaParent.exists()) {
      try {
        FileUtils.deleteDirectory(tmpKafkaParent);
      } catch (Exception e) {
        // ignore
      }
    }
    if (tmpZkDir != null && tmpZkDir.exists()) {
      try {
        FileUtils.deleteDirectory(tmpZkDir);
      } catch (Exception e) {
        // ignore
      }
    }
  }
Exemple #15
0
 @Override
 protected void startServer() throws Exception {
   String zkMode = System.getProperty("zkMode");
   if (!"real".equalsIgnoreCase(zkMode)) {
     try {
       m_zkServer = new TestingServer(2181);
       System.out.println(
           "Starting zk with fake mode, connection string is " + m_zkServer.getConnectString());
     } catch (Throwable e) {
       System.out.println("Zookeeper serer start failed, maybe started already.");
     }
   }
   super.startServer();
 }
Exemple #16
0
  public List<SimplifiedLog> consume(String topicName, int topicPartitions, int expectedMsg)
      throws InterruptedException {
    ConsumerConnector consumer =
        KafkaUtils.createConsumer(zkServer.getConnectString(), "test_group", "1");
    List<KafkaStream<String, SimplifiedLog>> streams =
        KafkaUtils.getConsumerStreams(consumer, topicName, topicPartitions);

    List<Callable<List<SimplifiedLog>>> tasks = new ArrayList<>();
    streams.forEach(stream -> tasks.add(createConsumerThread(stream.iterator(), expectedMsg)));

    ExecutorService executor = Executors.newFixedThreadPool(streams.size());
    List<Future<List<SimplifiedLog>>> futures =
        executor.invokeAll(tasks, 5 * expectedMsg, TimeUnit.SECONDS);

    List<SimplifiedLog> received = getResultsFromFutures(futures);
    consumer.shutdown();
    return received;
  }
  public Callable<Boolean> messagesArrived(final Collection<SimplifiedLog> expected) {
    final ConsumerConnector consumer =
        KafkaUtils.createConsumer(zkServer.getConnectString(), "test_group", "1");
    final ConsumerIterator<String, SimplifiedLog> consumerIterator =
        KafkaUtils.getConsumerIterator(consumer, topicName);
    final List<SimplifiedLog> received = new ArrayList<>();

    return new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        if (consumerIterator.hasNext()) {
          MessageAndMetadata data = consumerIterator.next();
          received.add((SimplifiedLog) data.message());
          LOGGER.debug(
              "Received message: {} | From partition: {}", data.message(), data.partition());
        }
        consumer.shutdown();
        return received.containsAll(expected);
      }
    };
  }
Exemple #18
0
    @Override
    protected Injector getInjector() {
      CuratorFramework curator = newClient(testZk.getConnectString(), new RetryNTimes(10, 500));

      ClusterConfig cfg =
          new ClusterConfig(
              ConfigFactory.parseString(
                  "zookeeper.use_new_stack = true \n"
                      + "zookeeper.curator_enabled = true \n"
                      + "zookeeper.root_key = "
                      + ZK_ROOT_MIDOLMAN
                      + "\n"
                      + "cluster.rest_api.root_uri = "
                      + CONTEXT_PATH
                      + "\n"
                      + "cluster.auth.provider_class = \"org.midonet.cluster.auth.MockAuthService\" "));

      AuthService authService = new MockAuthService(cfg.conf());

      MidonetBackendService backend =
          new MidonetBackendService(cfg.backend(), curator, null /* metricRegistry */);
      backend.startAsync().awaitRunning();

      FuncTest._injector =
          Guice.createInjector(
              Vladimir.servletModule(
                  backend, curator, cfg, authService, Logger.apply(getLogger(getClass()))),
              new AbstractModule() {
                @Override
                protected void configure() {
                  bind(OvsdbVtepConnectionProvider.class).to(MockOvsdbVtepConnectionProvider.class);
                  bind(TopologyBackdoor.class).to(ZoomTopologyBackdoor.class);
                }
              });

      return _injector;
    }
  @BeforeMethod
  public void initStuff() throws Exception {
    LOG.info("Creating ZK server instance listening in port {}...", ZK_PORT);
    while (zkServer == null) {
      try {
        zkServer = new TestingServer(ZK_PORT);
      } catch (BindException e) {
        System.err.println("Getting bind exception - retrying to allocate server");
        zkServer = null;
      }
    }
    LOG.info("ZK Server Started @ {}", zkServer.getConnectString());

    RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);

    LOG.info("Creating Zookeeper Client connected to {}", ZK_CLUSTER);
    zkClient =
        CuratorFrameworkFactory.builder()
            .namespace("omid")
            .connectString(ZK_CLUSTER)
            .retryPolicy(retryPolicy)
            .build();
    zkClient.start();
    zkClient.blockUntilConnected();

    LOG.info("Creating Internal Zookeeper Client connected to {}", ZK_CLUSTER);
    storageInternalZKClient =
        Mockito.spy(
            CuratorFrameworkFactory.builder()
                .namespace("omid")
                .connectString(ZK_CLUSTER)
                .retryPolicy(retryPolicy)
                .build());

    storage = new ZKTimestampStorage(storageInternalZKClient);
  }
  public void start() throws IOException {
    int maxLevel = 1;
    mTachyonHome =
        File.createTempFile("Tachyon", "U" + System.currentTimeMillis()).getAbsolutePath();
    mWorkerDataFolder = "/datastore";

    mHostname = NetworkAddressUtils.getLocalHostName(100);

    mMasterConf = MasterContext.getConf();
    mMasterConf.set(Constants.IN_TEST_MODE, "true");
    mMasterConf.set(Constants.TACHYON_HOME, mTachyonHome);
    mMasterConf.set(Constants.USE_ZOOKEEPER, "true");
    mMasterConf.set(Constants.MASTER_HOSTNAME, mHostname);
    mMasterConf.set(Constants.MASTER_BIND_HOST, mHostname);
    mMasterConf.set(Constants.MASTER_PORT, "0");
    mMasterConf.set(Constants.MASTER_WEB_BIND_HOST, mHostname);
    mMasterConf.set(Constants.MASTER_WEB_PORT, "0");
    mMasterConf.set(Constants.ZOOKEEPER_ADDRESS, mCuratorServer.getConnectString());
    mMasterConf.set(Constants.ZOOKEEPER_ELECTION_PATH, "/election");
    mMasterConf.set(Constants.ZOOKEEPER_LEADER_PATH, "/leader");
    mMasterConf.set(Constants.USER_QUOTA_UNIT_BYTES, "10000");
    mMasterConf.set(Constants.USER_DEFAULT_BLOCK_SIZE_BYTE, Integer.toString(mUserBlockSize));

    // Since tests are always running on a single host keep the resolution timeout low as otherwise
    // people running with strange network configurations will see very slow tests
    mMasterConf.set(Constants.HOST_RESOLUTION_TIMEOUT_MS, "250");

    // Disable hdfs client caching to avoid file system close() affecting other clients
    System.setProperty("fs.hdfs.impl.disable.cache", "true");

    // re-build the dir to set permission to 777
    deleteDir(mTachyonHome);
    mkdir(mTachyonHome);

    for (int k = 0; k < mNumOfMasters; k++) {
      final LocalTachyonMaster master = LocalTachyonMaster.create(mTachyonHome);
      master.start();
      LOG.info(
          "master NO."
              + k
              + " started, isServing: "
              + master.isServing()
              + ", address: "
              + master.getAddress());
      mMasters.add(master);
      // Each master should generate a new port for binding
      mMasterConf.set(Constants.MASTER_PORT, "0");
    }

    // Create the directories for the data and workers after LocalTachyonMaster construction,
    // because LocalTachyonMaster sets the UNDERFS_DATA_FOLDER and UNDERFS_WORKERS_FOLDER.
    mkdir(mMasterConf.get(Constants.UNDERFS_DATA_FOLDER));
    mkdir(mMasterConf.get(Constants.UNDERFS_WORKERS_FOLDER));

    LOG.info("all " + mNumOfMasters + " masters started.");
    LOG.info("waiting for a leader.");
    boolean hasLeader = false;
    while (!hasLeader) {
      for (int i = 0; i < mMasters.size(); i++) {
        if (mMasters.get(i).isServing()) {
          LOG.info(
              "master NO."
                  + i
                  + " is selected as leader. address: "
                  + mMasters.get(i).getAddress());
          hasLeader = true;
          break;
        }
      }
    }
    // Use first master port
    mMasterConf.set(Constants.MASTER_PORT, getMasterPort() + "");

    CommonUtils.sleepMs(10);

    mWorkerConf = WorkerContext.getConf();
    mWorkerConf.merge(mMasterConf);
    mWorkerConf.set(Constants.WORKER_DATA_FOLDER, mWorkerDataFolder);
    mWorkerConf.set(Constants.WORKER_MEMORY_SIZE, mWorkerCapacityBytes + "");
    mWorkerConf.set(Constants.WORKER_TO_MASTER_HEARTBEAT_INTERVAL_MS, 15 + "");

    // Setup conf for worker
    mWorkerConf.set(Constants.WORKER_MAX_TIERED_STORAGE_LEVEL, Integer.toString(maxLevel));
    mWorkerConf.set(String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_ALIAS_FORMAT, 0), "MEM");
    mWorkerConf.set(
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, 0),
        mTachyonHome + "/ramdisk");
    mWorkerConf.set(
        String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_QUOTA_FORMAT, 0),
        mWorkerCapacityBytes + "");

    // Since tests are always running on a single host keep the resolution timeout low as otherwise
    // people running with strange network configurations will see very slow tests
    mWorkerConf.set(Constants.HOST_RESOLUTION_TIMEOUT_MS, "250");

    for (int level = 1; level < maxLevel; level++) {
      String tierLevelDirPath =
          String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, level);
      String[] dirPaths = mWorkerConf.get(tierLevelDirPath).split(",");
      String newPath = "";
      for (String dirPath : dirPaths) {
        newPath += mTachyonHome + dirPath + ",";
      }
      mWorkerConf.set(
          String.format(Constants.WORKER_TIERED_STORAGE_LEVEL_DIRS_PATH_FORMAT, level),
          newPath.substring(0, newPath.length() - 1));
    }

    mWorkerConf.set(Constants.WORKER_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_PORT, "0");
    mWorkerConf.set(Constants.WORKER_DATA_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_DATA_PORT, "0");
    mWorkerConf.set(Constants.WORKER_WEB_BIND_HOST, mHostname);
    mWorkerConf.set(Constants.WORKER_WEB_PORT, "0");
    mWorkerConf.set(Constants.WORKER_MIN_WORKER_THREADS, "1");
    mWorkerConf.set(Constants.WORKER_MAX_WORKER_THREADS, "100");

    // Perform immediate shutdown of data server. Graceful shutdown is unnecessary and slow
    mWorkerConf.set(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_QUIET_PERIOD, Integer.toString(0));
    mWorkerConf.set(Constants.WORKER_NETWORK_NETTY_SHUTDOWN_TIMEOUT, Integer.toString(0));

    mWorker = new BlockWorker();
    Runnable runWorker =
        new Runnable() {
          @Override
          public void run() {
            try {
              mWorker.process();
            } catch (Exception e) {
              throw new RuntimeException(e + " \n Start Master Error \n" + e.getMessage(), e);
            }
          }
        };
    mWorkerThread = new Thread(runWorker);
    mWorkerThread.start();
    // The client context should reflect the updates to the conf.
    if (sReinitializer == null) {
      ClientContext.accessReinitializer(sReinitializerAccesser);
    }
    sReinitializer.reinitializeWithConf(mWorkerConf);
  }
  @Override
  public void prepare(
      int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
    this.additionalServerProperties = additionalServerProperties;
    File tempDir = new File(System.getProperty("java.io.tmpdir"));

    tmpZkDir = new File(tempDir, "kafkaITcase-zk-dir-" + (UUID.randomUUID().toString()));
    try {
      Files.createDirectories(tmpZkDir.toPath());
    } catch (IOException e) {
      fail("cannot create zookeeper temp dir: " + e.getMessage());
    }

    tmpKafkaParent = new File(tempDir, "kafkaITcase-kafka-dir" + (UUID.randomUUID().toString()));
    try {
      Files.createDirectories(tmpKafkaParent.toPath());
    } catch (IOException e) {
      fail("cannot create kafka temp dir: " + e.getMessage());
    }

    tmpKafkaDirs = new ArrayList<>(numKafkaServers);
    for (int i = 0; i < numKafkaServers; i++) {
      File tmpDir = new File(tmpKafkaParent, "server-" + i);
      assertTrue("cannot create kafka temp dir", tmpDir.mkdir());
      tmpKafkaDirs.add(tmpDir);
    }

    zookeeper = null;
    brokers = null;

    try {
      LOG.info("Starting Zookeeper");
      zookeeper = new TestingServer(-1, tmpZkDir);
      zookeeperConnectionString = zookeeper.getConnectString();

      LOG.info("Starting KafkaServer");
      brokers = new ArrayList<>(numKafkaServers);

      for (int i = 0; i < numKafkaServers; i++) {
        brokers.add(getKafkaServer(i, tmpKafkaDirs.get(i)));
        SocketServer socketServer = brokers.get(i).socketServer();

        String host = socketServer.host() == null ? "localhost" : socketServer.host();
        brokerConnectionString += hostAndPortToUrlString(host, socketServer.port()) + ",";
      }

      LOG.info("ZK and KafkaServer started.");
    } catch (Throwable t) {
      t.printStackTrace();
      fail("Test setup failed: " + t.getMessage());
    }

    standardProps = new Properties();
    standardProps.setProperty("zookeeper.connect", zookeeperConnectionString);
    standardProps.setProperty("bootstrap.servers", brokerConnectionString);
    standardProps.setProperty("group.id", "flink-tests");
    standardProps.setProperty("auto.commit.enable", "false");
    standardProps.setProperty(
        "zookeeper.session.timeout.ms",
        "30000"); // 6 seconds is default. Seems to be too small for travis.
    standardProps.setProperty("zookeeper.connection.timeout.ms", "30000");
    standardProps.setProperty(
        "auto.offset.reset", "smallest"); // read from the beginning. (smallest is kafka 0.8)
    standardProps.setProperty(
        "fetch.message.max.bytes", "256"); // make a lot of fetches (MESSAGES MUST BE SMALLER!)
  }
Exemple #22
0
 public void stop() throws IOException {
   if (zkClient != null) zkClient.close();
   if (kafkaServer != null) kafkaServer.shutdown();
   if (zkServer != null) zkServer.stop();
   LOGGER.debug("Zookeeper / Kafka services stopped!");
 }
Exemple #23
0
 protected String getZkConnectString() {
   if (zkServer != null) return zkServer.getConnectString();
   else throw new IllegalStateException("Zookeeper server is not initialized");
 }