コード例 #1
0
 @Test
 public void testGetBrokerInfo() throws Exception {
   String host = "localhost";
   int port = 9092;
   int partition = 0;
   addPartition(partition, host, port);
   GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
   assertEquals(1, brokerInfo.getOrderedPartitions().size());
   assertEquals(port, brokerInfo.getBrokerFor(partition).port);
   assertEquals(host, brokerInfo.getBrokerFor(partition).host);
 }
コード例 #2
0
  @Test
  public void testSwitchHostForPartition() throws Exception {
    String host = "localhost";
    int port = 9092;
    int partition = 0;
    addPartition(partition, host, port);
    GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
    assertEquals(port, brokerInfo.getBrokerFor(partition).port);
    assertEquals(host, brokerInfo.getBrokerFor(partition).host);

    String newHost = host + "switch";
    int newPort = port + 1;
    addPartition(partition, newHost, newPort);
    brokerInfo = dynamicBrokersReader.getBrokerInfo();
    assertEquals(newPort, brokerInfo.getBrokerFor(partition).port);
    assertEquals(newHost, brokerInfo.getBrokerFor(partition).host);
  }
コード例 #3
0
  @Test
  public void testMultiplePartitionsOnSameHost() throws Exception {
    String host = "localhost";
    int port = 9092;
    int partition = 0;
    int secondPartition = partition + 1;
    addPartition(partition, 0, host, port);
    addPartition(secondPartition, 0, host, port);

    GlobalPartitionInformation brokerInfo = dynamicBrokersReader.getBrokerInfo();
    assertEquals(2, brokerInfo.getOrderedPartitions().size());

    assertEquals(port, brokerInfo.getBrokerFor(partition).port);
    assertEquals(host, brokerInfo.getBrokerFor(partition).host);

    assertEquals(port, brokerInfo.getBrokerFor(secondPartition).port);
    assertEquals(host, brokerInfo.getBrokerFor(secondPartition).host);
  }
コード例 #4
0
  public static void main(String[] args)
      throws InterruptedException, AlreadyAliveException, InvalidTopologyException,
          AuthorizationException {

    TopologyBuilder builder = new TopologyBuilder();
    Properties config = PropertiesUtil.getPropertiesFromClassPath(CONFIG_NAME);

    if (args.length >= 1) {
      final String topic = args[0];
      logger.info("Reading from topic = " + topic);

      Broker brokerForPartition0 = new Broker(config.getProperty(BROKER));
      GlobalPartitionInformation partitionInfo = new GlobalPartitionInformation();
      partitionInfo.addPartition(0, brokerForPartition0);
      StaticHosts hosts = new StaticHosts(partitionInfo);

      SpoutConfig spoutConfig =
          new SpoutConfig(hosts, topic, "/" + topic, UUID.randomUUID().toString());

      KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

      builder.setSpout(KAFKA_SPOUT_READER, kafkaSpout, 1);

      builder
          .setBolt(KAFKA_AVRO_GROUPPER, new KafkaGrouperBolt(), 4)
          .shuffleGrouping(KAFKA_SPOUT_READER);

      builder
          .setBolt(
              KAFKA_AVRO_SENDER,
              new KafkaOutBolt()
                  .withTopicSelector(
                      new KafkaTopicSelector() {
                        private static final long serialVersionUID = -9189810246209479112L;

                        @Override
                        public String getTopic(Tuple tuple) {
                          Integer rand = (Integer) tuple.getValueByField(KafkaSenderFields.RANDOM);
                          return KafkaSenderFields.RANDOM + rand;
                        }
                      }),
              1)
          .fieldsGrouping(KAFKA_AVRO_GROUPPER, new Fields(KafkaSenderFields.RANDOM));

      builder.setSpout(
          KAFKA_SPOUT_READER_RANDOM1,
          new KafkaSpout(
              new SpoutConfig(hosts, "random1", "/random1", UUID.randomUUID().toString())),
          1);
      builder.setSpout(
          KAFKA_SPOUT_READER_RANDOM2,
          new KafkaSpout(
              new SpoutConfig(hosts, "random2", "/random2", UUID.randomUUID().toString())),
          1);
      builder.setSpout(
          KAFKA_SPOUT_READER_RANDOM3,
          new KafkaSpout(
              new SpoutConfig(hosts, "random3", "/random3", UUID.randomUUID().toString())),
          1);

      builder
          .setBolt(KAFKA_AVRO_CHECKER1, new KafkaCheckerBolt(1), 4)
          .shuffleGrouping(KAFKA_SPOUT_READER_RANDOM1);
      builder
          .setBolt(KAFKA_AVRO_CHECKER2, new KafkaCheckerBolt(2), 4)
          .shuffleGrouping(KAFKA_SPOUT_READER_RANDOM2);
      builder
          .setBolt(KAFKA_AVRO_CHECKER3, new KafkaCheckerBolt(3), 4)
          .shuffleGrouping(KAFKA_SPOUT_READER_RANDOM3);

      StormTopology kafkaTopology = builder.createTopology();
      StormSubmitter.submitTopology(TOPOLOGY_NAME, getConfig(config), kafkaTopology);
    } else {
      throw new IllegalArgumentException("topic name is required");
    }
  }