kafka生產(chǎn)者和消費(fèi)者的javaAPI的示例代碼
寫了個kafka的java demo 順便記錄下,僅供參考
1.創(chuàng)建maven項目
目錄如下:
2.pom文件:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>Kafka-Maven</groupId> <artifactId>Kafka-Maven</artifactId> <version>0.0.1-SNAPSHOT</version> <dependencies> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_2.11</artifactId> <version>0.10.1.1</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.2.0</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.2.0</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.2.0</version> </dependency> <dependency> <groupId>org.apache.hbase</groupId> <artifactId>hbase-client</artifactId> <version>1.0.3</version> </dependency> <dependency> <groupId>org.apache.hbase</groupId> <artifactId>hbase-server</artifactId> <version>1.0.3</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.2.0</version> </dependency> <dependency> <groupId>jdk.tools</groupId> <artifactId>jdk.tools</artifactId> <version>1.7</version> <scope>system</scope> <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>4.3.6</version> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>1.7</source> <target>1.7</target> </configuration> </plugin> </plugins> </build> </project>
3.kafka生產(chǎn)者KafkaProduce:
package com.lijie.producer; import java.io.File; import java.io.FileInputStream; import java.util.Properties; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class KafkaProduce { private static Properties properties; static { properties = new Properties(); String path = KafkaProducer.class.getResource("/").getFile().toString() + "kafka.properties"; try { FileInputStream fis = new FileInputStream(new File(path)); properties.load(fis); } catch (Exception e) { e.printStackTrace(); } } /** * 發(fā)送消息 * * @param topic * @param key * @param value */ public void sendMsg(String topic, byte[] key, byte[] value) { // 實例化produce KafkaProducer<byte[], byte[]> kp = new KafkaProducer<byte[], byte[]>( properties); // 消息封裝 ProducerRecord<byte[], byte[]> pr = new ProducerRecord<byte[], byte[]>( topic, key, value); // 發(fā)送數(shù)據(jù) kp.send(pr, new Callback() { // 回調(diào)函數(shù) @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (null != exception) { System.out.println("記錄的offset在:" + metadata.offset()); System.out.println(exception.getMessage() + exception); } } }); // 關(guān)閉produce kp.close(); } }
4.kafka消費(fèi)者KafkaConsume:
package com.lijie.consumer; import java.io.File; import java.io.FileInputStream; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper; import com.lijie.pojo.User; import com.lijie.utils.JsonUtils; import kafka.consumer.ConsumerConfig; import kafka.consumer.ConsumerIterator; import kafka.consumer.KafkaStream; import kafka.javaapi.consumer.ConsumerConnector; import kafka.serializer.StringDecoder; import kafka.utils.VerifiableProperties; public class KafkaConsume { private final static String TOPIC = "lijietest"; private static Properties properties; static { properties = new Properties(); String path = KafkaConsume.class.getResource("/").getFile().toString() + "kafka.properties"; try { FileInputStream fis = new FileInputStream(new File(path)); properties.load(fis); } catch (Exception e) { e.printStackTrace(); } } /** * 獲取消息 * * @throws Exception */ public void getMsg() throws Exception { ConsumerConfig config = new ConsumerConfig(properties); ConsumerConnector consumer = kafka.consumer.Consumer .createJavaConsumerConnector(config); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC, new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder( new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap = consumer .createMessageStreams(topicCountMap, keyDecoder, valueDecoder); KafkaStream<String, String> stream = consumerMap.get(TOPIC).get(0); ConsumerIterator<String, String> it = stream.iterator(); while (it.hasNext()) { String json = it.next().message(); User user = (User) JsonUtils.JsonToObj(json, User.class); System.out.println(user); } } }
5.kafka.properties文件
##produce bootstrap.servers=192.168.80.123:9092 producer.type=sync request.required.acks=1 serializer.class=kafka.serializer.DefaultEncoder key.serializer=org.apache.kafka.common.serialization.ByteArraySerializer value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer bak.partitioner.class=kafka.producer.DefaultPartitioner bak.key.serializer=org.apache.kafka.common.serialization.StringSerializer bak.value.serializer=org.apache.kafka.common.serialization.StringSerializer ##consume zookeeper.connect=192.168.80.123:2181 group.id=lijiegroup zookeeper.session.timeout.ms=4000 zookeeper.sync.time.ms=200 auto.commit.interval.ms=1000 auto.offset.reset=smallest serializer.class=kafka.serializer.StringEncoder
以上就是本文的全部內(nèi)容,希望對大家的學(xué)習(xí)有所幫助,也希望大家多多支持腳本之家。
相關(guān)文章
Elasticsearch寫入瓶頸導(dǎo)致skywalking大盤空白
這篇文章主要為大家介紹了Elasticsearch寫入瓶頸導(dǎo)致skywalking大盤空白的解決方案,有需要的朋友可以借鑒參考下,希望能夠有所幫助,祝大家多多進(jìn)步2022-02-02Java 實戰(zhàn)范例之線上婚紗攝影預(yù)定系統(tǒng)的實現(xiàn)
讀萬卷書不如行萬里路,只學(xué)書上的理論是遠(yuǎn)遠(yuǎn)不夠的,只有在實戰(zhàn)中才能獲得能力的提升,本篇文章手把手帶你用java+javaweb+SSM+springboot+mysql實現(xiàn)一個線上婚紗攝影預(yù)定系統(tǒng),大家可以在過程中查缺補(bǔ)漏,提升水平2021-11-11SpringBoot整合Redis實現(xiàn)登錄失敗鎖定功能(實例詳解)
本文我們已經(jīng)探討如何利用Redis來實現(xiàn)鎖定賬戶的安全措施,以及通過SpringBoot整合Redis實現(xiàn)了這一功能,感興趣的朋友跟隨小編一起學(xué)習(xí)下吧2024-02-02mybatis-config.xml文件中的mappers標(biāo)簽使用
在MyBatis配置中,<mapper>標(biāo)簽關(guān)鍵用于指定SQL?Mapper的XML文件路徑,主要有三種指定方式:resource、url和class,Resource方式從類的根路徑開始,適合放在項目內(nèi)部保障移植性,URL方式指定絕對路徑,移植性差,適用于外部路徑2024-10-10MybatisPlus關(guān)聯(lián)查詢的完美實現(xiàn)方案
我們在項目開發(fā)的時候,難免會遇到連表查詢的操作,所以下面這篇文章主要給大家介紹了關(guān)于MybatisPlus關(guān)聯(lián)查詢的相關(guān)資料,文中通過實例代碼介紹的非常詳細(xì),需要的朋友可以參考下2021-12-12