Quellcode durchsuchen

Pluralsight version.

Frederic G. MARAND vor 3 Jahren
Ursprung
Commit
e3dfd2c7cf

+ 0 - 15
.idea/artifacts/assign_jar.xml

@@ -1,15 +0,0 @@
-<component name="ArtifactManager">
-  <artifact type="jar" build-on-make="true" name="assign:jar">
-    <output-path>$PROJECT_DIR$/out/artifacts</output-path>
-    <root id="archive" name="assign.jar">
-      <element id="directory" name="META-INF">
-        <element id="file-copy" path="$PROJECT_DIR$/src/main/java/assign/META-INF/MANIFEST.MF" />
-      </element>
-      <element id="module-output" name="GettingStartedWithKafka" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/xerial/snappy/snappy-java/1.1.2.6/snappy-java-1.1.2.6.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/net/jpountz/lz4/lz4/1.3.0/lz4-1.3.0.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/kafka/kafka-clients/0.10.0.1/kafka-clients-0.10.0.1.jar" path-in-jar="/" />
-    </root>
-  </artifact>
-</component>

+ 0 - 15
.idea/artifacts/consumer_jar.xml

@@ -1,15 +0,0 @@
-<component name="ArtifactManager">
-  <artifact type="jar" build-on-make="true" name="consumer:jar">
-    <output-path>$PROJECT_DIR$/out/artifacts</output-path>
-    <root id="archive" name="consumer.jar">
-      <element id="directory" name="META-INF">
-        <element id="file-copy" path="$PROJECT_DIR$/src/main/java/consumer/META-INF/MANIFEST.MF" />
-      </element>
-      <element id="module-output" name="GettingStartedWithKafka" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/xerial/snappy/snappy-java/1.1.2.6/snappy-java-1.1.2.6.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/net/jpountz/lz4/lz4/1.3.0/lz4-1.3.0.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/kafka/kafka-clients/0.10.0.1/kafka-clients-0.10.0.1.jar" path-in-jar="/" />
-    </root>
-  </artifact>
-</component>

+ 0 - 15
.idea/artifacts/producer_jar.xml

@@ -1,15 +0,0 @@
-<component name="ArtifactManager">
-  <artifact type="jar" build-on-make="true" name="producer:jar">
-    <output-path>$PROJECT_DIR$/out/artifacts</output-path>
-    <root id="archive" name="producer.jar">
-      <element id="directory" name="META-INF">
-        <element id="file-copy" path="$PROJECT_DIR$/src/main/java/producer/META-INF/MANIFEST.MF" />
-      </element>
-      <element id="module-output" name="GettingStartedWithKafka" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/xerial/snappy/snappy-java/1.1.2.6/snappy-java-1.1.2.6.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/net/jpountz/lz4/lz4/1.3.0/lz4-1.3.0.jar" path-in-jar="/" />
-      <element id="extracted-dir" path="$MAVEN_REPOSITORY$/org/apache/kafka/kafka-clients/0.10.0.1/kafka-clients-0.10.0.1.jar" path-in-jar="/" />
-    </root>
-  </artifact>
-</component>

+ 22 - 3
.idea/compiler.xml

@@ -1,13 +1,32 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="CompilerConfiguration">
+    <resourceExtensions />
+    <wildcardResourcePatterns>
+      <entry name="!?*.java" />
+      <entry name="!?*.form" />
+      <entry name="!?*.class" />
+      <entry name="!?*.groovy" />
+      <entry name="!?*.scala" />
+      <entry name="!?*.flex" />
+      <entry name="!?*.kt" />
+      <entry name="!?*.clj" />
+      <entry name="!?*.aj" />
+    </wildcardResourcePatterns>
     <annotationProcessing>
-      <profile name="Maven default annotation processors profile" enabled="true">
+      <profile default="true" name="Default" enabled="false">
+        <processorPath useClasspath="true" />
+      </profile>
+      <profile default="false" name="Maven default annotation processors profile" enabled="true">
         <sourceOutputDir name="target/generated-sources/annotations" />
         <sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
         <outputRelativeToContentRoot value="true" />
-        <module name="GettingStartedWithKafka" />
+        <processorPath useClasspath="true" />
+        <module name="kafka" />
       </profile>
     </annotationProcessing>
+    <bytecodeTargetLevel>
+      <module name="kafka" target="1.5" />
+    </bytecodeTargetLevel>
   </component>
-</project>
+</project>

+ 4 - 1
.idea/misc.xml

@@ -1,5 +1,8 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
+  <component name="EntryPointsManager">
+    <entry_points version="2.0" />
+  </component>
   <component name="ExternalStorageConfigurationManager" enabled="true" />
   <component name="MavenProjectsManager">
     <option name="originalFiles">
@@ -8,7 +11,7 @@
       </list>
     </option>
   </component>
-  <component name="ProjectRootManager" version="2" languageLevel="JDK_1_8" project-jdk-name="1.8" project-jdk-type="JavaSDK">
+  <component name="ProjectRootManager" version="2" languageLevel="JDK_1_8" default="true" project-jdk-name="1.8" project-jdk-type="JavaSDK">
     <output url="file://$PROJECT_DIR$/out" />
   </component>
 </project>

BIN
docs/slides/1-apache-kafka-getting-started-m1-slides.pdf


BIN
docs/slides/2-apache-kafka-getting-started-m2-slides.pdf


BIN
docs/slides/3-apache-kafka-getting-started-m3-slides.pdf


BIN
docs/slides/4-apache-kafka-getting-started-m4-slides.pdf


BIN
docs/slides/5-apache-kafka-getting-started-m5-slides.pdf


BIN
docs/slides/6-apache-kafka-getting-started-m6-slides.pdf


+ 3 - 8
pom.xml

@@ -4,15 +4,10 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
 
-    <groupId>fr.osinet.ps.kafka</groupId>
-    <artifactId>samples</artifactId>
+    <groupId>com.ryanplant.kafka.getting-started</groupId>
+    <artifactId>basic-kafka-producer</artifactId>
     <version>0.1-SNAPSHOT</version>
 
-    <properties>
-        <maven.compiler.source>8</maven.compiler.source>
-        <maven.compiler.target>8</maven.compiler.target>
-    </properties>
-
     <dependencies>
         <dependency>
             <groupId>org.apache.kafka</groupId>
@@ -20,4 +15,4 @@
             <version>0.10.0.1</version>
         </dependency>
     </dependencies>
-</project>
+</project>

+ 59 - 0
src/main/java/BatchProducerApp.java

@@ -0,0 +1,59 @@
+import org.apache.kafka.clients.producer.*;
+
+import java.text.*;
+import java.util.*;
+
+public class BatchProducerApp {
+
+    public static void main(String[] args){
+
+        // Create the Properties class to instantiate the Consumer with the desired settings:
+        Properties props = new Properties();
+        props.put("bootstrap.servers", "localhost:9092, localhost:9093");
+        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("acks", "");
+        props.put("buffer.memory", 33554432);
+        props.put("compression.type", "none");
+        props.put("retries", 0);
+        props.put("batch.size", 16384);
+        props.put("client.id", "");
+        props.put("linger.ms", 0);
+        props.put("max.block.ms", 60000);
+        props.put("max.request.size", 1048576);
+        props.put("partitioner.class", "org.apache.kafka.clients.producer.internals.DefaultPartitioner");
+        props.put("request.timeout.ms", 30000);
+        props.put("timeout.ms", 30000);
+        props.put("max.in.flight.requests.per.connection", 5);
+        props.put("retry.backoff.ms", 5);
+
+        KafkaProducer<String, String> myProducer = new KafkaProducer<String, String>(props);
+        DateFormat dtFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss:SSS");
+        String topic = "my-topic";
+
+        try {
+            int batchNumber = 1;
+            int counter = 0;
+            while (true) {
+                do {
+                    myProducer.send(
+                            new ProducerRecord<String, String>(topic, String.format("Batch: %s || %s", Integer.toString(batchNumber), dtFormat.format(new Date())))
+                    );
+                    counter++; // Increase record counter...
+                    // Thread.sleep(500); // use if you want to add latency between record sends
+                    // Thread.sleep(new Random(1000).nextLong()); // use if you want to add random latency between record sends
+                } while (counter < 10); // Number of records sent in a batch...
+                counter = 0; // Reset the record counter...
+                Thread.sleep(500); // Set how long before a new batch is sent...
+                // Thread.sleep(new Random(5000).nextLong()); // use if you want to randomize the time between batch record sends
+                batchNumber++; // Increase the batch number...
+            }
+
+        } catch (Exception e) {
+            e.printStackTrace();
+        } finally {
+            myProducer.close();
+        }
+
+    }
+}

+ 80 - 0
src/main/java/ConsumerApp.java

@@ -0,0 +1,80 @@
+import org.apache.kafka.common.*;
+import org.apache.kafka.clients.consumer.*;
+
+import java.util.*;
+
+public class ConsumerApp {
+    public static void main(String[] args){
+
+        // Create the Properties class to instantiate the Consumer with the desired settings:
+        Properties props = new Properties();
+        props.put("bootstrap.servers", "localhost:9092, localhost:9093");
+        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+        props.put("fetch.min.bytes", 1);
+        props.put("group.id", "");
+        props.put("heartbeat.interval.ms", 3000);
+        props.put("max.partition.fetch.bytes", 1048576);
+        props.put("session.timeout.ms", 30000);
+        props.put("auto.offset.reset", "latest");
+        props.put("connections.max.idle.ms", 540000);
+        props.put("enable.auto.commit", true);
+        props.put("exclude.internal.topics", true);
+        props.put("max.poll.records", 2147483647);
+        props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RangeAssignor");
+        props.put("request.timeout.ms", 40000);
+        props.put("auto.commit.interval.ms", 5000);
+        props.put("fetch.max.wait.ms", 500);
+        props.put("metadata.max.age.ms", 300000);
+        props.put("reconnect.backoff.ms", 50);
+        props.put("retry.backoff.ms", 100);
+        props.put("client.id", "");
+
+
+        // Create a KafkaConsumer instance and configure it with properties.
+        KafkaConsumer<String, String> myConsumer = new KafkaConsumer<String, String>(props);
+
+        // Create a topic subscription list:
+        ArrayList<TopicPartition> partitions = new ArrayList<TopicPartition>();
+        partitions.add(new TopicPartition("my-topic", 0)); // Adds a TopicPartition instance representing a topic and a partition.
+        partitions.add(new TopicPartition("my-topic", 2)); // Adds an additional TopicPartition instance representing a different partition within the topic. Change as desired.
+        // Assign partitions to the Consumer:
+        myConsumer.assign(partitions);
+
+        // Retrieves the topic subscription list from the SubscriptionState internal object:
+        Set<TopicPartition> assignedPartitions = myConsumer.assignment();
+
+        // Print the partition assignments:
+        printSet(assignedPartitions);
+
+        // Start polling for messages:
+        try {
+            while (true){
+                ConsumerRecords records = myConsumer.poll(1000);
+                printRecords(records);
+            }
+        } finally {
+            myConsumer.close();
+        }
+
+    }
+
+    private static void printSet(Set<TopicPartition> collection){
+        if (collection.isEmpty()) {
+            System.out.println("I do not have any partitions assigned yet...");
+        }
+        else {
+            System.out.println("I am assigned to following partitions:");
+            for (TopicPartition partition: collection){
+                System.out.println(String.format("Partition: %s in Topic: %s", Integer.toString(partition.partition()), partition.topic()));
+            }
+        }
+    }
+
+    private static void printRecords(ConsumerRecords<String, String> records)
+    {
+        for (ConsumerRecord<String, String> record : records) {
+            System.out.println(String.format("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s", record.topic(), record.partition(), record.offset(), record.key(), record.value()));
+        }
+    }
+}

+ 79 - 0
src/main/java/ConsumerGroupApp.java

@@ -0,0 +1,79 @@
+import org.apache.kafka.clients.consumer.*;
+
+import java.util.*;
+
+public class ConsumerGroupApp {
+
+    public static void main(String[] args){
+
+        // Create the Properties class to instantiate the Consumer with the desired settings:
+        Properties props = new Properties();
+        props.put("bootstrap.servers", "localhost:9092, localhost:9093");
+        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+        props.put("fetch.min.bytes", 1);
+        props.put("group.id", "my-group"); // Required when subscribing to topics. Rename as desired.
+        props.put("heartbeat.interval.ms", 3000);
+        props.put("max.partition.fetch.bytes", 1048576);
+        props.put("session.timeout.ms", 30000);
+        props.put("auto.offset.reset", "latest");
+        props.put("connections.max.idle.ms", 540000);
+        props.put("enable.auto.commit", true);
+        props.put("exclude.internal.topics", true);
+        props.put("max.poll.records", 2147483647);
+        props.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RangeAssignor");
+        props.put("request.timeout.ms", 40000);
+        props.put("auto.commit.interval.ms", 5000);
+        props.put("fetch.max.wait.ms", 500);
+        props.put("metadata.max.age.ms", 300000);
+        props.put("reconnect.backoff.ms", 50);
+        props.put("retry.backoff.ms", 100);
+        props.put("client.id", "");
+
+
+        // Create a KafkaConsumer instance and configure it with properties.
+        KafkaConsumer<String, String> myConsumer = new KafkaConsumer<String, String>(props);
+
+        // Create a topic subscription list:
+        ArrayList<String> topics = new ArrayList<String>();
+        topics.add("my-topic");
+        // topics.add("myNewTopic");
+        myConsumer.subscribe(topics);
+
+        // Retrieves the topic subscription list from the SubscriptionState internal object:
+        Set<String> subscribedTopics = myConsumer.subscription();
+
+        // Print the topic subscription list:
+        printSet(subscribedTopics);
+
+        // Start polling for messages:
+        try {
+            while (true){
+                ConsumerRecords records = myConsumer.poll(1000);
+                printRecords(records);
+            }
+        } finally {
+            myConsumer.close();
+        }
+
+    }
+
+    private static void printSet(Set<String> collection){
+        if (collection.isEmpty()) {
+            System.out.println("I am not subscribed to anything yet...");
+        }
+        else {
+            System.out.println("I am subscribed to the following topics:");
+            for (String item : collection){
+                System.out.println(item);
+            }
+        }
+    }
+
+    private static void printRecords(ConsumerRecords<String, String> records)
+    {
+        for (ConsumerRecord<String, String> record : records) {
+            System.out.println(String.format("Topic: %s, Partition: %d, Offset: %d, Key: %s, Value: %s", record.topic(), record.partition(), record.offset(), record.key(), record.value()));
+        }
+    }
+}

+ 0 - 47
src/main/java/KafkaAssignApp.java

@@ -1,47 +0,0 @@
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.TopicPartition;
-
-import java.util.ArrayList;
-import java.util.Properties;
-
-public class KafkaAssignApp {
-
-  public static void main(String[] args) {
-    Properties props = new Properties();
-    KafkaConsumerCommon.configure(props);
-
-    // The consumer Group ID is NOT required for assign;
-    // props.put(ConsumerConfig.GROUP_ID_CONFIG, "demo");
-
-    KafkaConsumer<String, String> myConsumer = new KafkaConsumer<String, String>(props);
-
-    ArrayList<TopicPartition> partitions = new ArrayList<TopicPartition>();
-    TopicPartition myTopicPart0 = new TopicPartition(KafkaCommon.TOPIC, 0);
-    TopicPartition myTopicPart1 = new TopicPartition(KafkaCommon.TOPIC, 1);
-    partitions.add(myTopicPart0);
-    // Will block poll if the partition does not exist.
-    partitions.add(myTopicPart1);
-
-    /* Don't let the consumer handle the partitions */
-    myConsumer.assign(partitions);
-
-    try {
-      while (true) {
-        KafkaConsumerCommon.process(myConsumer);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-    } finally {
-      myConsumer.close();
-    }
-  }
-
-  private static void assignExamples(KafkaConsumer<String, String> c) {
-    TopicPartition partition0 = new TopicPartition(KafkaCommon.TOPIC, 0);
-    ArrayList<TopicPartition> partitions = new ArrayList<TopicPartition>();
-    partitions.add(partition0);
-
-    c.assign(partitions); // NOT incremental !
-  }
-
-}

+ 0 - 4
src/main/java/KafkaCommon.java

@@ -1,4 +0,0 @@
-public class KafkaCommon {
-  public static final String TOPIC = "my-topic";
-  public static final String BROKERS = "localhost:9092, BROKER-1:9092, BROKER-2:9093";
-}

+ 0 - 60
src/main/java/KafkaConsumerApp.java

@@ -1,60 +0,0 @@
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Properties;
-
-public class KafkaConsumerApp {
-
-  public static void main(String[] args) {
-    Properties props = new Properties();
-    KafkaConsumerCommon.configure(props);
-
-    // The consumer Group ID is now required on subscribe().
-    props.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaConsumerCommon.GROUP);
-
-    KafkaConsumer<String, String> myConsumer = new KafkaConsumer<String, String>(props);
-
-    /* Let the consumer handle the partitions */
-    myConsumer.subscribe(Arrays.asList(KafkaCommon.TOPIC));
-
-    try {
-      while (true) {
-        KafkaConsumerCommon.process(myConsumer);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-    } finally {
-      myConsumer.close();
-    }
-  }
-
-  private static void subscribeExamples(KafkaConsumer<String, String> c) {
-    // In tutorial but does not compile: needs a rebalance callback
-    // c.subscribe("my-*");
-    // Maybe that way ?
-    // c.subscribe(Pattern.compile("my-[\\w]+"), null);
-
-    // Typical initial subscription.
-    c.subscribe(Arrays.asList(KafkaCommon.TOPIC));
-
-    // Replaces the current subscription set, does not extend it
-    c.subscribe(Arrays.asList("another-topic"));
-
-    // Better for incremental cases.
-    ArrayList<String> topics = new ArrayList<String>();
-    topics.add(KafkaCommon.TOPIC);
-    topics.add("my-other-topic");
-    topics.add("yetAnotherTopic");
-    c.subscribe(topics);
-
-    // Unsubcribe all topics.
-    c.unsubscribe();
-
-    // Alternative
-    topics.clear();
-    c.subscribe(topics);
-  }
-}

+ 0 - 43
src/main/java/KafkaConsumerCommon.java

@@ -1,43 +0,0 @@
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.StringDeserializer;
-
-import java.util.Properties;
-
-public class KafkaConsumerCommon {
-  public static final String GROUP = "test-group";
-  public static final String STRING_DESERIALIZER = StringDeserializer.class.getName();
-  public static final Integer TIMEOUT = 10;
-  protected static Boolean started = false;
-  protected static Integer pass = 0;
-
-  static void configure(Properties props) {
-    // We use "put" since there are strings. Otherwise, use setProperty.
-    // These are the 3 required properties.
-    // 1. Cluster membership: partition leaders, etc
-    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCommon.BROKERS);
-
-    // 2. How keys and values are deserialized.
-    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, STRING_DESERIALIZER);
-    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, STRING_DESERIALIZER);
-  }
-
-  static void process(KafkaConsumer<String, String> c) {
-    ConsumerRecords<String, String> records = c.poll(KafkaConsumerCommon.TIMEOUT);
-    if (!started) {
-      started = true;
-      System.out.printf("Started");
-    }
-//                for (ConsumerRecord<String, String> cr : records.records(TOPIC)) {
-//                    System.out.printf("\t\tKey: %s Value: %s\n", cr.key(), cr.value());
-//                }
-    for (ConsumerRecord<String, String> cr : records) {
-      System.out.printf("Pass: %d/%d Topic: %s Partition: %d, Offset: %d, Key: %s Value: %s\n",
-        pass, records.count(),
-        cr.key(), cr.partition(), cr.offset(), cr.key(), cr.value());
-    }
-    pass++;
-  }
-}

+ 0 - 65
src/main/java/KafkaProducerApp.java

@@ -1,65 +0,0 @@
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-
-import java.time.Instant;
-import java.util.Properties;
-
-public class KafkaProducerApp {
-
-  public static void main(String[] args) {
-    Properties props = new Properties();
-    KafkaProducerCommon.configure(props);
-
-    KafkaProducer<String, String> myProducer = new KafkaProducer<String, String>(props);
-
-    try {
-      for (int i = 0; i < 100; i++) {
-        Instant ts = Instant.now();
-        Double ss = ts.toEpochMilli() + ts.getNano() / 1E9;
-        ProducerRecord myMessage = new ProducerRecord(KafkaCommon.TOPIC, String.format("%3d : %09.3f", i, ss));
-        myProducer.send(myMessage); // Best practice: wrap in try..catch.
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-    } finally {
-      myProducer.close();
-    }
-  }
-
-  public static void createMessagesExample() {
-    // Message with only the required properties.
-    ProducerRecord myMessage = new ProducerRecord(
-      KafkaCommon.TOPIC, // Topic to which the record will be sent.
-      "My Message 1" // Message content, matching the serializer type for value
-    );
-
-    // Non-matching type: runtime exception
-    // ProducerRecord myBadMessage = new ProducerRecord(TOPIC, 3.14159);
-
-    ProducerRecord myPartitionedMessage = new ProducerRecord(
-      KafkaCommon.TOPIC, // String Topic
-      1,  // Integer Partition
-      "My Message 1" // String Message
-    );
-
-    ProducerRecord myKeyedMessage = new ProducerRecord(
-      KafkaCommon.TOPIC, // String Topic
-      "Course-001",  // K key
-      "My Message 1" // String Message
-    );
-
-    // Adding optional properties
-    ProducerRecord msg3 = new ProducerRecord(
-      KafkaCommon.TOPIC, // String Topic
-      1, // Integer Partition
-      124535353325L, // Long timestamp, added in Kafka 0.10.
-      "Course-001", // K key: basis to determine partitioning strategy. Don't use blank or NULL.
-      // Key may contain additional message information, but adds overhead, depends on serializer.
-      "My Message 1" // V value
-    );
-    // The actual TS being send with the message is defined in server.properties:
-    // log.message.timestamp.type = [CreateTime, LogAppendTime]
-    // - CreateTime: producer-set timestamp is used
-    // - LogAppendtime: broker-set to the time when the message is appended to the commit log. Overrides the producet-set one.
-  }
-}

+ 0 - 19
src/main/java/KafkaProducerCommon.java

@@ -1,19 +0,0 @@
-import org.apache.kafka.clients.producer.ProducerConfig;
-
-import java.util.Properties;
-
-public class KafkaProducerCommon {
-  public static final String STRING_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer";
-
-  static void configure(Properties props) {
-    // We use "put" since there are strings. Otherwise, use setProperty.
-    // These are the 3 required properties.
-    // 1. Cluster membership: partition leaders, etc
-    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCommon.BROKERS);
-
-    // 2. How keys and values are serialized.
-    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER);
-    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER);
-  }
-
-}

+ 49 - 0
src/main/java/ProducerApp.java

@@ -0,0 +1,49 @@
+import org.apache.kafka.clients.producer.*;
+
+import java.text.*;
+import java.util.*;
+
+public class ProducerApp {
+
+    public static void main(String[] args){
+
+        // Create the Properties class to instantiate the Consumer with the desired settings:
+        Properties props = new Properties();
+        props.put("bootstrap.servers", "localhost:9092, localhost:9093");
+        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
+        props.put("acks", "");
+        props.put("buffer.memory", 33554432);
+        props.put("compression.type", "none");
+        props.put("retries", 0);
+        props.put("batch.size", 16384);
+        props.put("client.id", "");
+        props.put("linger.ms", 0);
+        props.put("max.block.ms", 60000);
+        props.put("max.request.size", 1048576);
+        props.put("partitioner.class", "org.apache.kafka.clients.producer.internals.DefaultPartitioner");
+        props.put("request.timeout.ms", 30000);
+        props.put("timeout.ms", 30000);
+        props.put("max.in.flight.requests.per.connection", 5);
+        props.put("retry.backoff.ms", 5);
+
+        KafkaProducer<String, String> myProducer = new KafkaProducer<String, String>(props);
+        DateFormat dtFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss:SSS");
+        String topic = "my-topic";
+
+        int numberOfRecords = 100; // number of records to send
+        long sleepTimer = 0; // how long you want to wait before the next record to be sent
+
+        try {
+                for (int i = 0; i < numberOfRecords; i++ )
+                    myProducer.send(new ProducerRecord<String, String>(topic, String.format("Message: %s  sent at %s", Integer.toString(i), dtFormat.format(new Date()))));
+                    Thread.sleep(sleepTimer);
+                    // Thread.sleep(new Random(5000).nextLong()); // use if you want to randomize the time between record sends
+        } catch (Exception e) {
+            e.printStackTrace();
+        } finally {
+            myProducer.close();
+        }
+
+    }
+}

+ 0 - 2
src/main/java/assign/META-INF/MANIFEST.MF

@@ -1,2 +0,0 @@
-Manifest-Version: 1.0
-Main-Class: KafkaAssignApp

+ 0 - 2
src/main/java/consumer/META-INF/MANIFEST.MF

@@ -1,2 +0,0 @@
-Manifest-Version: 1.0
-Main-Class: KafkaConsumerApp

+ 0 - 2
src/main/java/producer/META-INF/MANIFEST.MF

@@ -1,2 +0,0 @@
-Manifest-Version: 1.0
-Main-Class: KafkaProducerApp