SlideShare une entreprise Scribd logo
1  sur  11
Télécharger pour lire hors ligne
Install Kafka - 3 Node Cluster on AWS
3 EC2 instance for Kafka Cluster
Repeat commands for all - 3 EC2 instance for Kafka Cluster
cat /etc/*-release
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
java -version
mkdir kafka
cd kafka
wget http://download.nextag.com/apache/kafka/0.10.0.0/kafka_2.11-0.10.0.0.tgz
tar -zxvf kafka_2.11-0.10.0.0.tgz
cd kafka_2.11-0.10.0.0
ZooKeeper ==> 172.31.48.208 / 52.91.1.93
Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211
Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194
Modify config/server.properties for
kafka-datanode1 & kafkadatanode2
ZooKeeper ==> 172.31.48.208 / 52.91.1.93
Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211
Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194
Kafka-datanode1 (set following properties for config/server.properties)
ubuntu@ip-172-31-63-203:~/kafka/kafka_2.11-0.10.0.0$ vi config/server.properties
broker.id=1
listeners=PLAINTEXT://172.31.63.203:9092
advertised.listeners=PLAINTEXT://54.173.215.211:9092
zookeeper.connect=52.91.1.93:2181
Kafka-datanode2 (set following properties for config/server.properties)
ubuntu@ip-172-31-9-25:~/kafka/kafka_2.11-0.10.0.0$ vi config/server.properties
broker.id=2
listeners=PLAINTEXT://172.31.9.25:9092
advertised.listeners=PLAINTEXT://54.226.29.194:9092
zookeeper.connect=52.91.1.93:2181
Launch zookeeper / datanode1 / datanode2
ZooKeeper ==> 172.31.48.208 / 52.91.1.93
Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211
Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194
1) Start zookeeper
bin/zookeeper-server-start.sh config/zookeeper.properties
2) Start server on Kafka-datanode1
bin/kafka-server-start.sh config/server.properties
3) Start server on Kafka-datanode2
bin/kafka-server-start.sh config/server.properties
4) Create Topic & Start consumer
bin/kafka-topics.sh --zookeeper 52.91.1.93:2181 --create --topic data --partitions 1 --replication-factor 2
bin/kafka-console-consumer.sh --zookeeper 52.91.1.93:2181 --topic data --from-beginning
Java - Kafka Producer Sample Application
package com.himanshu;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
//import util.properties packages
import java.util.Properties;
//import simple producer packages
import org.apache.kafka.clients.producer.Producer;
//import KafkaProducer packages
import org.apache.kafka.clients.producer.KafkaProducer;
//import ProducerRecord packages
import org.apache.kafka.clients.producer.ProducerRecord;
public class DataProducer {
public static void main(String[] args) {
// Check arguments length value
/*
if(args.length == 0) {
System.out.println("Enter topic name");
return;
}
*/
//Assign topicName to string variable
String topicName = "data"; //args[0].toString();
// create instance for properties to access producer configs
Properties props = new Properties();
//Assign localhost id
props.put("bootstrap.servers", "54.173.215.211:9092,54.226.29.194:9092");
//props.put("metadata.broker.list", "172.31.63.203:9092,172.31.9.25:9092");
//Set acknowledgements for producer requests.
props.put("acks", "all");
//If the request fails, the producer can automatically retry,
props.put("retries", 0);
//Specify buffer size in config
props.put("batch.size", 16384);
//Reduce the no of requests less than 0
props.put("linger.ms", 1);
//The buffer.memory controls the total amount of memory available to the producer for buffering.
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<String, String>(props);
String csvFile = "/Users/himanshu/Documents/workspace/KafkaProducer/src/com/himanshu/invoice.txt";
String csvSplitBy = ",";
BufferedReader br = null;
String lineInvoice = "";
try {
br = new BufferedReader(new FileReader(csvFile));
while((lineInvoice = br.readLine()) != null ) {
String[] invoice = lineInvoice.split(csvSplitBy);
producer.send(new ProducerRecord<String, String>(topicName, lineInvoice));
System.out.println("Message sent successfully....");
}
}
catch (FileNotFoundException e) {
e.printStackTrace();
}
Java - Kafka Producer Sample Application
catch (IOException e) {
e.printStackTrace();
}
finally {
producer.close();
if (br != null) {
try {
br.close();
}
catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
Java - Kafka Producer Sample Application
Sample data which we will be sending to Kafka Server
from Java Kafka Producer (csv file)
Message received on kafka datanode1
Thank You
hkbhadraa@gmail.com

Contenu connexe

Tendances

Python Deployment with Fabric
Python Deployment with FabricPython Deployment with Fabric
Python Deployment with Fabric
andymccurdy
 

Tendances (20)

Terraform Abstractions for Safety and Power
Terraform Abstractions for Safety and PowerTerraform Abstractions for Safety and Power
Terraform Abstractions for Safety and Power
 
Orchestration? You Don't Need Orchestration. What You Want Is Choreography by...
Orchestration? You Don't Need Orchestration. What You Want Is Choreography by...Orchestration? You Don't Need Orchestration. What You Want Is Choreography by...
Orchestration? You Don't Need Orchestration. What You Want Is Choreography by...
 
Python Deployment with Fabric
Python Deployment with FabricPython Deployment with Fabric
Python Deployment with Fabric
 
Beyond static configuration
Beyond static configurationBeyond static configuration
Beyond static configuration
 
Docker storage designing a platform for persistent data
Docker storage designing a platform for persistent dataDocker storage designing a platform for persistent data
Docker storage designing a platform for persistent data
 
ZooKeeper - wait free protocol for coordinating processes
ZooKeeper - wait free protocol for coordinating processesZooKeeper - wait free protocol for coordinating processes
ZooKeeper - wait free protocol for coordinating processes
 
Docker up and running
Docker up and runningDocker up and running
Docker up and running
 
Kubernetes Boston — Custom High Availability of Kubernetes
Kubernetes Boston — Custom High Availability of KubernetesKubernetes Boston — Custom High Availability of Kubernetes
Kubernetes Boston — Custom High Availability of Kubernetes
 
Docker Swarm 0.2.0
Docker Swarm 0.2.0Docker Swarm 0.2.0
Docker Swarm 0.2.0
 
runC: The little engine that could (run Docker containers) by Docker Captain ...
runC: The little engine that could (run Docker containers) by Docker Captain ...runC: The little engine that could (run Docker containers) by Docker Captain ...
runC: The little engine that could (run Docker containers) by Docker Captain ...
 
Amazon EC2 Container Service in Action
Amazon EC2 Container Service in ActionAmazon EC2 Container Service in Action
Amazon EC2 Container Service in Action
 
Scaling Next-Generation Internet TV on AWS With Docker, Packer, and Chef
Scaling Next-Generation Internet TV on AWS With Docker, Packer, and ChefScaling Next-Generation Internet TV on AWS With Docker, Packer, and Chef
Scaling Next-Generation Internet TV on AWS With Docker, Packer, and Chef
 
Kubernetes HA @ AppDirect - Montreal Kubernetes Meetup
Kubernetes HA @ AppDirect - Montreal Kubernetes MeetupKubernetes HA @ AppDirect - Montreal Kubernetes Meetup
Kubernetes HA @ AppDirect - Montreal Kubernetes Meetup
 
AWS ECS Quick Introduction
AWS ECS Quick IntroductionAWS ECS Quick Introduction
AWS ECS Quick Introduction
 
Under the Hood with Docker Swarm Mode - Drew Erny and Nishant Totla, Docker
Under the Hood with Docker Swarm Mode - Drew Erny and Nishant Totla, DockerUnder the Hood with Docker Swarm Mode - Drew Erny and Nishant Totla, Docker
Under the Hood with Docker Swarm Mode - Drew Erny and Nishant Totla, Docker
 
(APP310) Scheduling Using Apache Mesos in the Cloud | AWS re:Invent 2014
(APP310) Scheduling Using Apache Mesos in the Cloud | AWS re:Invent 2014(APP310) Scheduling Using Apache Mesos in the Cloud | AWS re:Invent 2014
(APP310) Scheduling Using Apache Mesos in the Cloud | AWS re:Invent 2014
 
CoreOS Overview and Current Status
CoreOS Overview and Current StatusCoreOS Overview and Current Status
CoreOS Overview and Current Status
 
Docker on AWS with Amazon ECR & ECS - Pop-up Loft Tel Aviv
Docker on AWS with Amazon ECR & ECS - Pop-up Loft Tel Aviv Docker on AWS with Amazon ECR & ECS - Pop-up Loft Tel Aviv
Docker on AWS with Amazon ECR & ECS - Pop-up Loft Tel Aviv
 
Docker toolbox
Docker toolboxDocker toolbox
Docker toolbox
 
Docker at Shopify: From This-Looks-Fun to Production by Simon Eskildsen (Shop...
Docker at Shopify: From This-Looks-Fun to Production by Simon Eskildsen (Shop...Docker at Shopify: From This-Looks-Fun to Production by Simon Eskildsen (Shop...
Docker at Shopify: From This-Looks-Fun to Production by Simon Eskildsen (Shop...
 

Similaire à Setup 3 Node Kafka Cluster on AWS - Hands On

Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
Carlos Sanchez
 

Similaire à Setup 3 Node Kafka Cluster on AWS - Hands On (20)

Big data lambda architecture - Streaming Layer Hands On
Big data lambda architecture - Streaming Layer Hands OnBig data lambda architecture - Streaming Layer Hands On
Big data lambda architecture - Streaming Layer Hands On
 
Continuous Delivery: The Next Frontier
Continuous Delivery: The Next FrontierContinuous Delivery: The Next Frontier
Continuous Delivery: The Next Frontier
 
Big data and hadoop training - Session 5
Big data and hadoop training - Session 5Big data and hadoop training - Session 5
Big data and hadoop training - Session 5
 
Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
Continuous Delivery with Maven, Puppet and Tomcat - ApacheCon NA 2013
 
Salesforce at Stacki Atlanta Meetup February 2016
Salesforce at Stacki Atlanta Meetup February 2016Salesforce at Stacki Atlanta Meetup February 2016
Salesforce at Stacki Atlanta Meetup February 2016
 
Multinode kubernetes-cluster
Multinode kubernetes-clusterMultinode kubernetes-cluster
Multinode kubernetes-cluster
 
kubernetes practice
kubernetes practicekubernetes practice
kubernetes practice
 
Pythonic Deployment with Fabric 0.9
Pythonic Deployment with Fabric 0.9Pythonic Deployment with Fabric 0.9
Pythonic Deployment with Fabric 0.9
 
DevOps Enabling Your Team
DevOps Enabling Your TeamDevOps Enabling Your Team
DevOps Enabling Your Team
 
Postgres the hardway
Postgres the hardwayPostgres the hardway
Postgres the hardway
 
Building and Testing from Scratch a Puppet Environment with Docker - PuppetCo...
Building and Testing from Scratch a Puppet Environment with Docker - PuppetCo...Building and Testing from Scratch a Puppet Environment with Docker - PuppetCo...
Building and Testing from Scratch a Puppet Environment with Docker - PuppetCo...
 
Bare Metal to OpenStack with Razor and Chef
Bare Metal to OpenStack with Razor and ChefBare Metal to OpenStack with Razor and Chef
Bare Metal to OpenStack with Razor and Chef
 
Component pack 6006 install guide
Component pack 6006 install guideComponent pack 6006 install guide
Component pack 6006 install guide
 
Artem Zhurbila - docker clusters (solit 2015)
Artem Zhurbila - docker clusters (solit 2015)Artem Zhurbila - docker clusters (solit 2015)
Artem Zhurbila - docker clusters (solit 2015)
 
Building and Testing Puppet with Docker
Building and Testing Puppet with DockerBuilding and Testing Puppet with Docker
Building and Testing Puppet with Docker
 
Things I've learned working with Docker Support
Things I've learned working with Docker SupportThings I've learned working with Docker Support
Things I've learned working with Docker Support
 
10 Excellent Ways to Secure Spring Boot Applications - Okta Webinar 2020
10 Excellent Ways to Secure Spring Boot Applications - Okta Webinar 202010 Excellent Ways to Secure Spring Boot Applications - Okta Webinar 2020
10 Excellent Ways to Secure Spring Boot Applications - Okta Webinar 2020
 
Vagrant for real codemotion (moar tips! ;-))
Vagrant for real codemotion (moar tips! ;-))Vagrant for real codemotion (moar tips! ;-))
Vagrant for real codemotion (moar tips! ;-))
 
infra-as-code
infra-as-codeinfra-as-code
infra-as-code
 
Build Your Own CaaS (Container as a Service)
Build Your Own CaaS (Container as a Service)Build Your Own CaaS (Container as a Service)
Build Your Own CaaS (Container as a Service)
 

Plus de hkbhadraa

Plus de hkbhadraa (13)

Big data and hadoop training - Session 3
Big data and hadoop training - Session 3Big data and hadoop training - Session 3
Big data and hadoop training - Session 3
 
Big data and hadoop training - Session 2
Big data and hadoop training  - Session 2Big data and hadoop training  - Session 2
Big data and hadoop training - Session 2
 
Retail products - machine learning recommendation engine
Retail products   - machine learning recommendation engineRetail products   - machine learning recommendation engine
Retail products - machine learning recommendation engine
 
Big data Lambda Architecture - Batch Layer Hands On
Big data Lambda Architecture - Batch Layer Hands OnBig data Lambda Architecture - Batch Layer Hands On
Big data Lambda Architecture - Batch Layer Hands On
 
Project management part 5
Project management part 5Project management part 5
Project management part 5
 
Project management part 4
Project management part 4Project management part 4
Project management part 4
 
Project management part 3
Project management part 3Project management part 3
Project management part 3
 
Project management part 2
Project management part 2Project management part 2
Project management part 2
 
Project management part 1
Project management part 1Project management part 1
Project management part 1
 
Hadoop BIG Data - Fraud Detection with Real-Time Analytics
Hadoop BIG Data - Fraud Detection with Real-Time AnalyticsHadoop BIG Data - Fraud Detection with Real-Time Analytics
Hadoop BIG Data - Fraud Detection with Real-Time Analytics
 
Gamification
GamificationGamification
Gamification
 
Internet of things
Internet of thingsInternet of things
Internet of things
 
IBM Bluemix Cloud Platform Application Development with Eclipse IDE
IBM Bluemix Cloud Platform Application Development with Eclipse IDEIBM Bluemix Cloud Platform Application Development with Eclipse IDE
IBM Bluemix Cloud Platform Application Development with Eclipse IDE
 

Dernier

Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Safe Software
 
Why Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire businessWhy Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire business
panagenda
 

Dernier (20)

Corporate and higher education May webinar.pptx
Corporate and higher education May webinar.pptxCorporate and higher education May webinar.pptx
Corporate and higher education May webinar.pptx
 
AXA XL - Insurer Innovation Award Americas 2024
AXA XL - Insurer Innovation Award Americas 2024AXA XL - Insurer Innovation Award Americas 2024
AXA XL - Insurer Innovation Award Americas 2024
 
Manulife - Insurer Transformation Award 2024
Manulife - Insurer Transformation Award 2024Manulife - Insurer Transformation Award 2024
Manulife - Insurer Transformation Award 2024
 
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers:  A Deep Dive into Serverless Spatial Data and FMECloud Frontiers:  A Deep Dive into Serverless Spatial Data and FME
Cloud Frontiers: A Deep Dive into Serverless Spatial Data and FME
 
Apidays New York 2024 - The Good, the Bad and the Governed by David O'Neill, ...
Apidays New York 2024 - The Good, the Bad and the Governed by David O'Neill, ...Apidays New York 2024 - The Good, the Bad and the Governed by David O'Neill, ...
Apidays New York 2024 - The Good, the Bad and the Governed by David O'Neill, ...
 
MS Copilot expands with MS Graph connectors
MS Copilot expands with MS Graph connectorsMS Copilot expands with MS Graph connectors
MS Copilot expands with MS Graph connectors
 
Apidays New York 2024 - The value of a flexible API Management solution for O...
Apidays New York 2024 - The value of a flexible API Management solution for O...Apidays New York 2024 - The value of a flexible API Management solution for O...
Apidays New York 2024 - The value of a flexible API Management solution for O...
 
Emergent Methods: Multi-lingual narrative tracking in the news - real-time ex...
Emergent Methods: Multi-lingual narrative tracking in the news - real-time ex...Emergent Methods: Multi-lingual narrative tracking in the news - real-time ex...
Emergent Methods: Multi-lingual narrative tracking in the news - real-time ex...
 
Boost Fertility New Invention Ups Success Rates.pdf
Boost Fertility New Invention Ups Success Rates.pdfBoost Fertility New Invention Ups Success Rates.pdf
Boost Fertility New Invention Ups Success Rates.pdf
 
Why Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire businessWhy Teams call analytics are critical to your entire business
Why Teams call analytics are critical to your entire business
 
Apidays New York 2024 - APIs in 2030: The Risk of Technological Sleepwalk by ...
Apidays New York 2024 - APIs in 2030: The Risk of Technological Sleepwalk by ...Apidays New York 2024 - APIs in 2030: The Risk of Technological Sleepwalk by ...
Apidays New York 2024 - APIs in 2030: The Risk of Technological Sleepwalk by ...
 
Exploring the Future Potential of AI-Enabled Smartphone Processors
Exploring the Future Potential of AI-Enabled Smartphone ProcessorsExploring the Future Potential of AI-Enabled Smartphone Processors
Exploring the Future Potential of AI-Enabled Smartphone Processors
 
Repurposing LNG terminals for Hydrogen Ammonia: Feasibility and Cost Saving
Repurposing LNG terminals for Hydrogen Ammonia: Feasibility and Cost SavingRepurposing LNG terminals for Hydrogen Ammonia: Feasibility and Cost Saving
Repurposing LNG terminals for Hydrogen Ammonia: Feasibility and Cost Saving
 
DBX First Quarter 2024 Investor Presentation
DBX First Quarter 2024 Investor PresentationDBX First Quarter 2024 Investor Presentation
DBX First Quarter 2024 Investor Presentation
 
FWD Group - Insurer Innovation Award 2024
FWD Group - Insurer Innovation Award 2024FWD Group - Insurer Innovation Award 2024
FWD Group - Insurer Innovation Award 2024
 
Cyberprint. Dark Pink Apt Group [EN].pdf
Cyberprint. Dark Pink Apt Group [EN].pdfCyberprint. Dark Pink Apt Group [EN].pdf
Cyberprint. Dark Pink Apt Group [EN].pdf
 
[BuildWithAI] Introduction to Gemini.pdf
[BuildWithAI] Introduction to Gemini.pdf[BuildWithAI] Introduction to Gemini.pdf
[BuildWithAI] Introduction to Gemini.pdf
 
Strategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a FresherStrategies for Landing an Oracle DBA Job as a Fresher
Strategies for Landing an Oracle DBA Job as a Fresher
 
CNIC Information System with Pakdata Cf In Pakistan
CNIC Information System with Pakdata Cf In PakistanCNIC Information System with Pakdata Cf In Pakistan
CNIC Information System with Pakdata Cf In Pakistan
 
Exploring Multimodal Embeddings with Milvus
Exploring Multimodal Embeddings with MilvusExploring Multimodal Embeddings with Milvus
Exploring Multimodal Embeddings with Milvus
 

Setup 3 Node Kafka Cluster on AWS - Hands On

  • 1. Install Kafka - 3 Node Cluster on AWS
  • 2. 3 EC2 instance for Kafka Cluster
  • 3. Repeat commands for all - 3 EC2 instance for Kafka Cluster cat /etc/*-release sudo add-apt-repository ppa:webupd8team/java sudo apt-get update sudo apt-get install oracle-java8-installer java -version mkdir kafka cd kafka wget http://download.nextag.com/apache/kafka/0.10.0.0/kafka_2.11-0.10.0.0.tgz tar -zxvf kafka_2.11-0.10.0.0.tgz cd kafka_2.11-0.10.0.0 ZooKeeper ==> 172.31.48.208 / 52.91.1.93 Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211 Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194
  • 4. Modify config/server.properties for kafka-datanode1 & kafkadatanode2 ZooKeeper ==> 172.31.48.208 / 52.91.1.93 Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211 Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194 Kafka-datanode1 (set following properties for config/server.properties) ubuntu@ip-172-31-63-203:~/kafka/kafka_2.11-0.10.0.0$ vi config/server.properties broker.id=1 listeners=PLAINTEXT://172.31.63.203:9092 advertised.listeners=PLAINTEXT://54.173.215.211:9092 zookeeper.connect=52.91.1.93:2181 Kafka-datanode2 (set following properties for config/server.properties) ubuntu@ip-172-31-9-25:~/kafka/kafka_2.11-0.10.0.0$ vi config/server.properties broker.id=2 listeners=PLAINTEXT://172.31.9.25:9092 advertised.listeners=PLAINTEXT://54.226.29.194:9092 zookeeper.connect=52.91.1.93:2181
  • 5. Launch zookeeper / datanode1 / datanode2 ZooKeeper ==> 172.31.48.208 / 52.91.1.93 Kafka-datanode1 ==> 172.31.63.203 / 54.173.215.211 Kafka-datanode2 ==> 172.31.9.25 / 54.226.29.194 1) Start zookeeper bin/zookeeper-server-start.sh config/zookeeper.properties 2) Start server on Kafka-datanode1 bin/kafka-server-start.sh config/server.properties 3) Start server on Kafka-datanode2 bin/kafka-server-start.sh config/server.properties 4) Create Topic & Start consumer bin/kafka-topics.sh --zookeeper 52.91.1.93:2181 --create --topic data --partitions 1 --replication-factor 2 bin/kafka-console-consumer.sh --zookeeper 52.91.1.93:2181 --topic data --from-beginning
  • 6. Java - Kafka Producer Sample Application package com.himanshu; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; //import util.properties packages import java.util.Properties; //import simple producer packages import org.apache.kafka.clients.producer.Producer; //import KafkaProducer packages import org.apache.kafka.clients.producer.KafkaProducer; //import ProducerRecord packages import org.apache.kafka.clients.producer.ProducerRecord; public class DataProducer { public static void main(String[] args) { // Check arguments length value /* if(args.length == 0) { System.out.println("Enter topic name"); return; } */ //Assign topicName to string variable String topicName = "data"; //args[0].toString(); // create instance for properties to access producer configs Properties props = new Properties(); //Assign localhost id props.put("bootstrap.servers", "54.173.215.211:9092,54.226.29.194:9092"); //props.put("metadata.broker.list", "172.31.63.203:9092,172.31.9.25:9092");
  • 7. //Set acknowledgements for producer requests. props.put("acks", "all"); //If the request fails, the producer can automatically retry, props.put("retries", 0); //Specify buffer size in config props.put("batch.size", 16384); //Reduce the no of requests less than 0 props.put("linger.ms", 1); //The buffer.memory controls the total amount of memory available to the producer for buffering. props.put("buffer.memory", 33554432); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Producer<String, String> producer = new KafkaProducer<String, String>(props); String csvFile = "/Users/himanshu/Documents/workspace/KafkaProducer/src/com/himanshu/invoice.txt"; String csvSplitBy = ","; BufferedReader br = null; String lineInvoice = ""; try { br = new BufferedReader(new FileReader(csvFile)); while((lineInvoice = br.readLine()) != null ) { String[] invoice = lineInvoice.split(csvSplitBy); producer.send(new ProducerRecord<String, String>(topicName, lineInvoice)); System.out.println("Message sent successfully...."); } } catch (FileNotFoundException e) { e.printStackTrace(); } Java - Kafka Producer Sample Application
  • 8. catch (IOException e) { e.printStackTrace(); } finally { producer.close(); if (br != null) { try { br.close(); } catch (IOException e) { e.printStackTrace(); } } } } } Java - Kafka Producer Sample Application
  • 9. Sample data which we will be sending to Kafka Server from Java Kafka Producer (csv file)
  • 10. Message received on kafka datanode1