@Override public void setup() { LOGGER.setLevel(Level.DEBUG); LOGGER.debug("HBASE TEST SETUP!"); if (!TestUtils.isSet(zookeeper)) { zookeeper = System.getProperty("zookeeperUrl"); if (!TestUtils.isSet(zookeeper)) { PropertyParser propertyParser = null; try { propertyParser = new PropertyParser(HBASE_PROPS_FILE); propertyParser.parsePropsFile(); } catch (final IOException e) { LOGGER.error("Unable to load property file: {}" + HBASE_PROPS_FILE); } if (System.getProperty("os.name").startsWith("Windows")) { System.setProperty("HADOOP_HOME", System.getenv().get("HADOOP_HOME")); } try { zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() .setPort( Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) .setZookeeperConnectionString( propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) .build(); zookeeperLocalCluster.start(); } catch (final Exception e) { LOGGER.error("Exception starting zookeeperLocalCluster: " + e); e.printStackTrace(); Assert.fail(); } zookeeper = zookeeperLocalCluster.getZookeeperConnectionString(); LOGGER.debug("Using local zookeeper URL: " + zookeeper); try { Configuration conf = new Configuration(); conf.set("hbase.online.schema.update.enable", "true"); hbaseLocalCluster = new HbaseLocalCluster.Builder() .setHbaseMasterPort( Integer.parseInt( propertyParser.getProperty(ConfigVars.HBASE_MASTER_PORT_KEY))) .setHbaseMasterInfoPort( Integer.parseInt( propertyParser.getProperty(ConfigVars.HBASE_MASTER_INFO_PORT_KEY))) .setNumRegionServers( Integer.parseInt( propertyParser.getProperty(ConfigVars.HBASE_NUM_REGION_SERVERS_KEY))) .setHbaseRootDir(propertyParser.getProperty(ConfigVars.HBASE_ROOT_DIR_KEY)) .setZookeeperPort( Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) .setZookeeperConnectionString( propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) .setZookeeperZnodeParent( propertyParser.getProperty(ConfigVars.HBASE_ZNODE_PARENT_KEY)) .setHbaseWalReplicationEnabled( Boolean.parseBoolean( propertyParser.getProperty(ConfigVars.HBASE_WAL_REPLICATION_ENABLED_KEY))) .setHbaseConfiguration(conf) .build(); hbaseLocalCluster.start(); } catch (final Exception e) { LOGGER.error("Exception starting hbaseLocalCluster: " + e); e.printStackTrace(); Assert.fail(); } } else { LOGGER.debug("Using system zookeeper URL: " + zookeeper); } } else { LOGGER.debug("Using system zookeeper URL: " + zookeeper); } }
public static void main(String args[]) { final ZookeeperLocalCluster zookeeperLocalCluster = new ZookeeperLocalCluster.Builder() .setPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) .setTempDir(propertyParser.getProperty(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY)) .setZookeeperConnectionString( propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) .build(); kafkaLocalBroker = new KafkaLocalBroker.Builder() .setKafkaHostname(propertyParser.getProperty(ConfigVars.KAFKA_HOSTNAME_KEY)) .setKafkaPort(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_PORT_KEY))) .setKafkaBrokerId( Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_BROKER_ID_KEY))) .setKafkaProperties(new Properties()) .setKafkaTempDir(propertyParser.getProperty(ConfigVars.KAFKA_TEST_TEMP_DIR_KEY)) .setZookeeperConnectionString( propertyParser.getProperty(ConfigVars.ZOOKEEPER_CONNECTION_STRING_KEY)) .build(); final StormLocalCluster stormLocalCluster = new StormLocalCluster.Builder() .setZookeeperHost(propertyParser.getProperty(ConfigVars.ZOOKEEPER_HOST_KEY)) .setZookeeperPort( Long.parseLong(propertyParser.getProperty(ConfigVars.ZOOKEEPER_PORT_KEY))) .setEnableDebug( Boolean.parseBoolean(propertyParser.getProperty(ConfigVars.STORM_ENABLE_DEBUG_KEY))) .setNumWorkers( Integer.parseInt(propertyParser.getProperty(ConfigVars.STORM_NUM_WORKERS_KEY))) .setEnableDebug(true) .setStormConfig(new Config()) .build(); try { zookeeperLocalCluster.start(); kafkaLocalBroker.start(); stormLocalCluster.start(); } catch (Exception e) { LOG.error("Couldn't start the services: " + e.getMessage()); e.printStackTrace(); } SensorEventsParam sensorEventsParam = new SensorEventsParam(); sensorEventsParam.setEventEmitterClassName( "com.hortonworks.simulator.impl.domain.transport.Truck"); sensorEventsParam.setEventCollectorClassName( "com.hortonworks.solution.KafkaSensorEventCollector"); sensorEventsParam.setNumberOfEvents(200); sensorEventsParam.setDelayBetweenEvents(1000); sensorEventsParam.setRouteDirectory( Launcher.class.getResource("/" + "routes/midwest").getPath()); sensorEventsParam.setTruckSymbolSize(10000); SensorEventsGenerator sensorEventsGenerator = new SensorEventsGenerator(); sensorEventsGenerator.generateTruckEventsStream(sensorEventsParam); BrokerHosts hosts = new ZkHosts(zookeeperLocalCluster.getZookeeperConnectionString()); String topic = "truck_events"; String zkRoot = "/trucks"; String consumerGroupId = "group1"; SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, consumerGroupId); spoutConfig.scheme = new SchemeAsMultiScheme(new TruckScheme2()); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); int spoutCount = 10; int boltCount = 1; TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("kafkaSpout", kafkaSpout, spoutCount); StormTopology topology = builder.createTopology(); stormLocalCluster.submitTopology( "part2: Truck events to Kafka", stormLocalCluster.getStormConf(), topology); Runtime.getRuntime() .addShutdownHook( new Thread() { @Override public void run() { try { stormLocalCluster.stop(DO_CLEAN_UP); kafkaLocalBroker.stop(DO_CLEAN_UP); zookeeperLocalCluster.stop(DO_CLEAN_UP); } catch (Exception e) { LOG.error("Couldn't shutdown the services: " + e.getLocalizedMessage()); e.printStackTrace(); } } }); while (true) { // run until ctrl-c'd or stopped from IDE } }