KafkaProducerApp.java 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. import org.apache.kafka.clients.producer.KafkaProducer;
  2. import org.apache.kafka.clients.producer.ProducerConfig;
  3. import org.apache.kafka.clients.producer.ProducerRecord;
  4. import java.time.Instant;
  5. import java.util.Properties;
  6. public class KafkaProducerApp {
  7. public static final String STRING_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer";
  8. public static final String TOPIC = "my-topic";
  9. public static void main(String[] args) {
  10. Properties props = new Properties();
  11. // These are the 3 required properties.
  12. // 1. Cluster membership: partition leaders, etc
  13. props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092, BROKER-1:9092, BROKER-2:9093");
  14. // 2. How keys and values are serialized.
  15. props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER);
  16. props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER);
  17. KafkaProducer<String, String> myProducer = new KafkaProducer<String, String>(props);
  18. try {
  19. for (int i = 0; i < 100; i++) {
  20. Instant ts = Instant.now();
  21. Double ss = ts.toEpochMilli() + ts.getNano() / 1E9;
  22. ProducerRecord myMessage = new ProducerRecord(TOPIC, String.format("%3d : %09.3f", i, ss));
  23. myProducer.send(myMessage); // Best practice: wrap in try..catch.
  24. }
  25. } catch (Exception e) {
  26. e.printStackTrace();
  27. } finally {
  28. myProducer.close();
  29. }
  30. }
  31. public static void createMessagesExample() {
  32. // Message with only the required properties.
  33. ProducerRecord myMessage = new ProducerRecord(
  34. TOPIC, // Topic to which the record will be sent.
  35. "My Message 1" // Message content, matching the serializer type for value
  36. );
  37. // Non-matching type: runtime exception
  38. // ProducerRecord myBadMessage = new ProducerRecord(TOPIC, 3.14159);
  39. ProducerRecord myPartitionedMessage = new ProducerRecord(
  40. TOPIC, // String Topic
  41. 1, // Integer Partition
  42. "My Message 1" // String Message
  43. );
  44. ProducerRecord myKeyedMessage = new ProducerRecord(
  45. TOPIC, // String Topic
  46. "Course-001", // K key
  47. "My Message 1" // String Message
  48. );
  49. // Adding optional properties
  50. ProducerRecord msg3 = new ProducerRecord(
  51. TOPIC, // String Topic
  52. 1, // Integer Partition
  53. 124535353325L, // Long timestamp, added in Kafka 0.10.
  54. "Course-001", // K key: basis to determine partitioning strategy. Don't use blank or NULL.
  55. // Key may contain additional message information, but adds overhead, depends on serializer.
  56. "My Message 1" // V value
  57. );
  58. // The actual TS being send with the message is defined in server.properties:
  59. // log.message.timestamp.type = [CreateTime, LogAppendTime]
  60. // - CreateTime: producer-set timestamp is used
  61. // - LogAppendtime: broker-set to the time when the message is appended to the commit log. Overrides the producet-set one.
  62. }
  63. }