/** @param args the command line arguments 1) kafka broker */
  public static void main(String[] args) {
    Properties props = new Properties();
    if (args.length >= 1) {
      props.put("metadata.broker.list", args[0]);
    } else {
      props.put("metadata.broker.list", "192.168.47.129:9093");
    }
    props.put("serializer.class", "eu.europeana.cloud.service.dps.storm.kafka.JsonEncoder");
    props.put("request.required.acks", "1");

    ProducerConfig config = new ProducerConfig(props);
    Producer<String, DpsTask> producer = new Producer<>(config);

    DpsTask msg = new DpsTask();

    msg.setTaskName(PluginParameterKeys.NEW_ANNOTATION_MESSAGE);

    msg.addParameter(PluginParameterKeys.INDEX_DATA, "True");
    IndexerInformations ii =
        new IndexerInformations(indexers[0], "index_mlt_4", "mlt4", "192.168.47.129:9300");
    msg.addParameter(PluginParameterKeys.INDEXER, ii.toTaskString());
    msg.addParameter(PluginParameterKeys.FILE_URL, "url to annotation");

    KeyedMessage<String, DpsTask> data =
        new KeyedMessage<>(IndexerConstants.KAFKA_INPUT_TOPIC, msg);
    producer.send(data);
    producer.close();
  }
예제 #2
0
  public static void main(String[] args) throws IOException, ConnectionException {

    Logger.getRootLogger().setLevel(Level.WARN);

    // *** start the storm cluster
    LocalCluster cluster = new LocalCluster();

    // *** start kafka
    LocalKafkaBroker broker = new LocalKafkaBroker(0, 9090, 4, "localhost:2000");
    ReplayConfig replay =
        new ReplayConfig().staticHosts(broker.getHostPortStrings(), broker.getNumPartitions());

    // *** build a topology
    //        KarmaConfig karmaConfig = new KarmaConfigImpl("a", replay, new
    // InMemoryReducerState());
    KarmaConfig karmaConfig = new KarmaConfigImpl("a", replay, new CassandraReducerState("demo"));
    StormTopology topology = buildTopology(karmaConfig);

    // *** submit the topology to storm
    Config config = new Config();
    config.setMaxSpoutPending(50);
    cluster.submitTopology("bankdemo", config, topology);

    // *** send some events
    Producer<Long, Message> kafkaProducer = broker.buildSyncProducer();
    JsonProducer mb = new JsonProducer(kafkaProducer).printSendsToConsole(true);

    sendBankingDemoMessages(mb);

    Utils.sleep(100000);
    kafkaProducer.close();
  }
예제 #3
0
  public static void main(String[] args) {
    String s = 1 + 1 + '1' + 1 + 1 + 1 + 1 + "1";
    System.out.println(s);
    Random rnd = new Random();
    Properties props = new Properties();

    props.put("metadata.broker.list", "120.25.204.152:9092");
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("partitioner.class", "com.mt.kafka.SimplePartitioner");
    props.put("request.required.acks", "1");
    ProducerConfig config = new ProducerConfig(props);

    Producer<String, String> producer = new Producer<String, String>(config);

    for (long nEvents = 0; nEvents < 4; nEvents++) {
      long runtime = new Date().getTime();
      String ip = "192.168.2." + rnd.nextInt(255);
      String msg = runtime + ",www.example.com," + ip;
      KeyedMessage<String, String> data = new KeyedMessage<String, String>("page_visits", ip, msg);
      producer.send(data);
    }
    producer.close(); // FailoverCluster
    // RegistryProtocol
    // Invoker<T>
  }
예제 #4
0
 /*
  * (non-Javadoc)
  * @see org.apache.camel.impl.DefaultProducer#doStop()
  */
 @Override
 protected void doStop() throws Exception {
   super.doStop();
   producer.close();
   if (LOG.isInfoEnabled()) {
     LOG.info("Kafka Producer Component stoped");
   }
 }
예제 #5
0
  public void sendKafka(String message) {

    setPropsKafkaConfigHolder();
    setProducer();
    setKafkaMessage(message);
    producer.send(kafkaMessage);
    producer.close();
  }
예제 #6
0
  public static void main(String[] args) {
    String topic;
    String url;

    if (args.length == 0) {
      url = "localhost:9092";
      topic = "sensorStream";
    } else {
      url = args[0];
      topic = args[1];
    }

    String[] sensorEvents =
        new String[] {
          "<events>\n"
              + "    <event>\n"
              + "        <payloadData>\n"
              + "            <sensorId>ID1</sensorId>\n"
              + "            <sensorVersion>version1</sensorVersion>\n"
              + "            <sensorValue>45</sensorValue>\n"
              + "        </payloadData>\n"
              + "    </event>\n"
              + "</events>",
          "<events>\n"
              + "    <event>\n"
              + "        <payloadData>\n"
              + "            <sensorId>ID2</sensorId>\n"
              + "            <sensorVersion>version2</sensorVersion>\n"
              + "            <sensorValue>43</sensorValue>\n"
              + "        </payloadData>\n"
              + "    </event>\n"
              + "</events>",
          "<events>\n"
              + "    <event>\n"
              + "        <payloadData>\n"
              + "            <sensorId>ID1</sensorId>\n"
              + "            <sensorVersion>version3</sensorVersion>\n"
              + "            <sensorValue>23</sensorValue>\n"
              + "        </payloadData>\n"
              + "    </event>\n"
              + "</events>"
        };

    Properties props = new Properties();
    props.put("metadata.broker.list", url);
    props.put("serializer.class", "kafka.serializer.StringEncoder");

    ProducerConfig config = new ProducerConfig(props);
    Producer<String, Object> producer = new Producer<String, Object>(config);

    for (String sensorEvent : sensorEvents) {
      KeyedMessage<String, Object> data = new KeyedMessage<String, Object>(topic, sensorEvent);
      producer.send(data);
    }

    producer.close();
  }
예제 #7
0
 public void awaitShutdown() {
   try {
     shutdownComplete.await();
     producer.close();
     logger.info("Producer thread " + threadName + " shutdown complete");
   } catch (InterruptedException ie) {
     logger.warn("Interrupt during shutdown of ProducerThread", ie);
   }
 }
예제 #8
0
  public void sendMultiMessageKafka(String message) {

    setPropsKafkaConfigHolder();
    setProducer();
    for (int i = 0; i < 20; i++) {
      setKafkaMultiMessage(message);
    }
    producer.send(kafkaMultiMessage);
    producer.close();
  }
예제 #9
0
  public void send(String topic, String message) throws Exception {

    Producer<Integer, String> producer =
        new kafka.javaapi.producer.Producer<Integer, String>(
            new ProducerConfig(Config.getInstance().getProperties(Config.CONTEXT.PRODUCER)));

    Config.getInstance()
        .getProperties(Config.CONTEXT.PRODUCER)
        .forEach((x, y) -> LOG.debug(x + "=" + y));
    producer.send(new KeyedMessage<Integer, String>(topic, message));
    producer.close();
  }
 public void start() throws IOException {
   KafkaProducer kafkaProducer = new KafkaProducer();
   Producer<Integer, byte[]> producer = kafkaProducer.GetProducer();
   Map<String, byte[]> hashMap = new HashMap<String, byte[]>();
   byte[] staticPayload = new byte[100];
   Arrays.fill(staticPayload, (byte) 0);
   hashMap.put("word", staticPayload);
   for (long i = 0; i < amountOfNumbers; i++) {
     KeyedMessage<Integer, byte[]> keyedMessage =
         new KeyedMessage<Integer, byte[]>(topicName, convert.toByteFrom(hashMap));
     producer.send(keyedMessage);
   }
   producer.close();
 }
  private void testProduceAMessage() throws TimeoutException {

    // Produce a message so we can check new offsets.
    ProducerConfig conf = kafkaRule.producerConfigWithStringEncoder();
    Producer<String, String> producer = new Producer<>(conf);
    producer.send(new KeyedMessage<>(testTopicName, "key", "value"));
    producer.close();

    // Verify publish
    List<String> messages = kafkaRule.readStringMessages(testTopicName, 1);
    assertThat(messages, is(notNullValue()));
    assertThat(messages.size(), is(1));
    assertThat(messages.get(0), is("value"));
  }
예제 #12
0
  @Override
  public void send(int seerCount, int subPubCount)
      throws JsonGenerationException, JsonMappingException, IOException, SeerReportingException {

    String messagePayloadAsJson = new ObjectMapper().writeValueAsString(messageSource);

    if (directlyToKafkaV1) {

      Producer<String, String> subpubProducer;
      Properties props = new Properties();
      props.put(
          "metadata.broker.list", kafkaBroker); // broker  10.252.5.240,  10.252.1.99,  10.252.3.239
      props.put("serializer.class", "kafka.serializer.StringEncoder");
      props.put("request.required.acks", "1");
      subpubProducer = new Producer<String, String>(new ProducerConfig(props));
      KeyedMessage<String, String> data =
          new KeyedMessage<String, String>(seerTopic, messagePayloadAsJson);

      while (seerCount > 0) {
        subpubProducer.send(data);
        seerCount--;
      }

      subpubProducer.close();

      System.out.println(
          "Publishing message to "
              + seerTopic
              + " topic; The kafka broker is "
              + kafkaBroker
              + ".");
      System.out.println("Message Has Body: \n" + messagePayloadAsJson + "\n");

    } else {
      ClientConfiguration config = new ClientConfiguration(seerClient, "hRxQN4tBfy4S", seerServer);
      SeerClient client = new SeerClient(config);

      client.reportRawTincan(messagePayloadAsJson);

      System.out.println("Publishing Seer message");
      System.out.println("Publishing message of type: " + "Tincan");
      System.out.println("Message Has Body: \n" + messagePayloadAsJson + "\n");
    }
  }
예제 #13
0
  public static void main(String[] args) throws Exception {
    String topic = "aichuche-topic";
    Random random = new Random(128);

    Properties props = new Properties();
    // props.put("zk.connect", "10.91.228.28:2181,10.91.228.29:2181,10.91.228.30:2181");
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("metadata.broker.list", "210.51.31.68:9092,210.51.31.67:9092");
    props.put("request.required.acks", "1");
    ProducerConfig config = new ProducerConfig(props);
    Producer<String, String> producer = new Producer<String, String>(config);

    for (int i = 0; i <= 10000000; i = i + 1) {
      String deviceId = i % 2 == 0 ? "+86test_1" : "+86test_2";
      String currentDateUnixTimestamp =
          String.valueOf(DateUtils.getUnixTimestampFromCurrentDate()); // yyyyMMddHHmmss
      String currentDate =
          new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
              .format(DateUtils.getLocalTimeDateFromUnixTimestamp(currentDateUnixTimestamp));
      String data =
          deviceId
              + ";1185;2;101,"
              + currentDateUnixTimestamp
              + ",-0.4884,-0.6512,9.3278,-0.0097,-0.0024,-0.0061,-17.1875,-1.8750,30.5625,31.253138,121.354008,3.4328;"
              + currentDate;
      String partitionKey = null;
      String mesg = data;
      KeyedMessage<String, String> data2 =
          new KeyedMessage<String, String>(topic, partitionKey, mesg);
      producer.send(data2);
      System.out.println("send to topic :" + mesg);
      Thread.sleep(2 * 1000);
    }
    producer.close();

    System.out.println("=====================OVER================");
  }
예제 #14
0
  public static void main(String[] args) {
    String topic = args[0];
    long events = Long.parseLong(args[1]);
    Random rnd = new Random();

    Properties props = new Properties();
    props.put("metadata.broker.list", "localhost:9092,localhost:9093");
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("partitioner.class", "com.test.groups.SimplePartitioner");
    props.put("request.required.acks", "1");

    ProducerConfig config = new ProducerConfig(props);

    Producer<String, String> producer = new Producer<String, String>(config);

    for (long nEvents = 0; nEvents < events; nEvents++) {
      long runtime = new Date().getTime();
      String ip = "192.168.2." + rnd.nextInt(255);
      String msg = runtime + ",www.example.com," + ip;
      KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, ip, msg);
      producer.send(data);
    }
    producer.close();
  }
예제 #15
0
 /** {@inheritDoc} */
 @Override
 public void destroy() throws StreamingException {
   if (producer != null) {
     producer.close();
   }
 }
예제 #16
0
  public static void main(String[] args) throws IOException {

    Logger.getRootLogger().setLevel(Level.WARN);

    // *** start the storm cluster
    LocalCluster cluster = new LocalCluster();

    // *** start the embedded kafka service
    LocalKafkaBroker broker = new LocalKafkaBroker(0, 9090, 1, "localhost:2000");

    // *** configure replay and karma to use the local kafka instance
    ReplayConfig replay =
        new ReplayConfig().staticHosts(broker.getHostPortStrings(), broker.getNumPartitions());
    KarmaConfig karmaConfig =
        new KarmaConfig("a").replay(replay).reducerState(new InMemoryReducerState());

    KarmaTopologyBuilder karma = new KarmaTopologyBuilder(karmaConfig, "testA");

    karma.setSpout(
        "orgSpout",
        new KafkaSpout(replay.buildReplaySpoutConfig("org", ORG_SCHEME, "orgSpoutId")),
        4);
    karma.setSpout(
        "userSpout",
        new KafkaSpout(replay.buildReplaySpoutConfig("user", USER_SCHEME, "userSpoutId")),
        4);

    karma
        .map("{ orgId }", "userSpout(id)", new Count("orgId"))
        .red("{ userCount }", "orgUserCounts(orgId)", new Sum());
    karma
        .map("{ userCount }", "orgUserCounts(orgId)", new Count("userCount"))
        .red("{ total samples }", "allOrgs()", new Sum())
        .fmt("{ totalUsers averagePerOrg }", "{ d -> [d.total, d.total / d.samples] }");
    buildSniffer("allOrgs", karma);

    karma
        .map("{ orgId }", "userSpout(id)", "{ u -> emit(u.orgId, 1) }")
        .red("{ userCount }", "orgUserCounts2(orgId)", "{ a, b -> [a.userCount + b.userCount] }");
    karma
        .map("{ userCount }", "orgUserCounts2(orgId)", "{ d -> emit(d.userCount, 1) }")
        .red(
            "{ total samples }",
            "allOrgs2()",
            "{ a, b -> [a.total + b.total, a.samples + b.samples]}")
        .fmt("{ totalUsers averagePerOrg }", "{ d -> [d.total, d.total / d.samples] }");
    buildSniffer("allOrgs2", karma);

    // *** build a name count using the scripting support
    karma
        .map("{ name }", "userSpout(id)", "{ u -> emit(u.name, 1L) }")
        .red("{ count }", "nameCounts(name)", "{ a, b -> [a.count + b.count] }");
    buildSniffer("nameCounts", karma);

    karma
        .map("{ name }", "userSpout(id)", "{ u -> emit(u.name, 1L) }")
        .red("{ count }", "nameCounts2(name)", "{ a, b -> [a.count + b.count] }");
    buildSniffer("nameCounts2", karma);

    karma
        .map("{ orgId }", "userSpout(id)", "{ u -> emit(u.orgId, 1L) }")
        .red("{ count }", "empCounts(orgId)", "{ a, b -> [a.count + b.count] }");
    buildSniffer("empCounts", karma);

    karma
        .map("{ name }", "userSpout(id)", "{ u -> emit(1L) }")
        .red("{ count }", "userCount()", "{ a, b -> [a.count + b.count] }");
    buildSniffer("userCount", karma);

    karma
        .map("{ name }", "userSpout(id)", "{ u -> emit(1L) }")
        .red("{ count }", "userCount2()", "{ a, b -> [a.count + b.count] }");
    buildSniffer("userCount2", karma);

    karma
        .map(
            "{ id name }",
            "orgSpout(id)",
            new Mapper() {
              @Override
              public void map(Tuple t, Emitter e) {
                e.emit(t.getValueByField("id"), t.getStringByField("name"), L());
              }
            })
        .map(
            "{ orgId name }",
            "userSpout(id)",
            new Mapper() {
              @Override
              public void map(Tuple t, Emitter e) {
                e.emit(t.getValueByField("orgId"), null, L(t.getStringByField("name")));
              }
            })
        .red(
            "{ orgName userNames }",
            "orgToUsernames(orgId)",
            new Reducer() {
              @Override
              public List reduce(Tuple key, Tuple a, Tuple b) {
                Set<String> names = new TreeSet<String>();
                names.addAll((List) (a.getValueByField("userNames")));
                names.addAll((List) (b.getValueByField("userNames")));
                return L(
                    a.getString(0) != null ? a.getString(0) : b.getString(0), new ArrayList(names));
              }
            });

    karma
        .map(
            "orgSpout",
            L("id"),
            L("id", "name"),
            new Mapper() {
              @Override
              public void map(Tuple t, Emitter e) {
                e.emit(t.getValueByField("id"), t.getStringByField("name"), L());
              }
            })
        .map(
            "userSpout",
            L("id"),
            L("orgId", "name"),
            new Mapper() {
              @Override
              public void map(Tuple t, Emitter e) {
                e.emit(t.getValueByField("orgId"), null, L(t.getStringByField("name")));
              }
            })
        .red(
            "{ orgName userNames }",
            "org2Usernames(orgId)",
            new Reducer() {
              @Override
              public List reduce(Tuple key, Tuple a, Tuple b) {
                Set<String> names = new TreeSet<String>();
                names.addAll((List) (a.getValueByField("userNames")));
                names.addAll((List) (b.getValueByField("userNames")));
                return L(
                    a.getString(0) != null ? a.getString(0) : b.getString(0), new ArrayList(names));
              }
            });
    buildSniffer("org2Usernames", karma);

    karma
        .map(
            "{ orgName userNames }",
            "org2Usernames(orgId)",
            new Mapper() {
              @Override
              public void map(Tuple t, Emitter e) {
                String orgName = t.getStringByField("orgName");
                if (orgName != null)
                  for (String userName : (List<String>) t.getValueByField("userNames")) {
                    e.emit(userName, L(orgName));
                  }
              }
            })
        .red(
            "{ orgNames }",
            "userNames2OrgNames(userName)",
            new Reducer() {
              @Override
              public List reduce(Tuple key, Tuple a, Tuple b) {
                System.out.println("userNames2OrgNames reducing: a: " + a + ", b: " + b);
                Set<String> orgNames = new TreeSet<String>();
                orgNames.addAll((List) a.getValue(0));
                orgNames.addAll((List) b.getValue(0));
                return L(new ArrayList(orgNames));
              }
            });

    buildSniffer("userNames2OrgNames", karma);

    cluster.submitTopology("karma", new Config(), karma.createTopology());

    Producer<Long, Message> producer = broker.buildSyncProducer();

    //        Demo.countdown("Adding orgs and users in ", 5);

    //        sendOrgs(producer);
    //        send100Users(producer);

    //        Demo.readEnter("*** Adding acme", 3);
    sendOrg(producer, 1000, "Acme");

    //        Demo.readEnter("*** Adding 10 greggs", 3);
    for (int i = 0; i < 10; i++) {
      //            Demo.readEnter("** Adding gregg " + (i + 1), 1);
      sendUser(producer, 2000 + i, "Gregg", 1000);
    }

    //        Demo.readEnter("*** Changing greggs to seth and assigning to org 1 in", 3);
    Utils.sleep(2000);
    sendOrg(producer, 1, "Kfc");
    for (int i = 0; i < 10; i++) {
      //            Demo.readEnter("** Changing gregg " + (i + 1) + " to seth and Kfc in", 1);
      sendUser(producer, 2000 + i, "Seth", 1);
    }

    //        Demo.readEnter("*** Deleting acme", 3);
    for (int i = 0; i < 10; i++) deleteUser(producer, 2000 + i);
    deleteOrg(producer, 1000);
    deleteOrg(producer, 1);

    Utils.sleep(100000);

    producer.close();
  }
 public void stop() {
   producer.close();
 }
예제 #18
0
 /* (non-Javadoc)
  * @see com.mozilla.bagheera.producer.Producer#close()
  */
 public void close() {
   if (producer != null) {
     producer.close();
   }
 }
예제 #19
0
 public void close() {
   producer.close();
 }
예제 #20
0
 @Override
 public synchronized void stop() {
   // TODO Auto-generated method stub
   producer.close();
   super.stop();
 }