前言
使用工具:kafka_2.12-3.1.0.tgz
测试项目:spring cloud
测试系统:windows10
安装kafka
下载解压kafka,先启动zookeeper,启动命令为:
zookeeper-server-start.bat ../../config/zookeeper.properties
然后启动kafka,启动命令为:
kafka-server-start.bat ../../config/server.properties
如果出现报错,可删除log文件重新启动
kafka不再使用zookeeper创建topic,所以而是使用--bootstrap-server进行创建
kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1 --topic test
springboot连接kafka
新建spring cloud项目,具体详见学习spring cloud记录1-使用idea新建第一个spring cloud,新建两个module是因为模拟两个客户端
引入依赖
在父级中引入即可
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
配置application.yml
注意bootstrap-servers不能写localhost,要写自己的局域网ip
server:
port: 9000
servlet:
context-path: /kafka
spring:
kafka:
# kafka连接地址
bootstrap-servers: ip:9092
producer:
# 发生错误后,消息重发的次数。
retries: 0
#当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
batch-size: 16384
# 设置生产者内存缓冲区的大小。
buffer-memory: 33554432
# 键的序列化方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
# 值的序列化方式
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
# acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
acks: 1
consumer:
# 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
auto-commit-interval: 1S
# 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: earliest
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
enable-auto-commit: false
# 键的反序列化方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 值的反序列化方式
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
# 在侦听器容器中运行的线程数。
concurrency: 5
#listner负责ack,每调用一次,就立即commit
ack-mode: manual_immediate
missing-topics-fatal: false
# 自定义的kafka配置项
kafka:
topic:
name: TEST_TOPIC
group:
id: TEST_GROUP
编写配置类
1 package priv.sinoam.config;
2
3 import org.apache.kafka.clients.CommonClientConfigs;
4 import org.apache.kafka.clients.producer.ProducerConfig;
5 import org.apache.kafka.common.serialization.StringSerializer;
6 import org.springframework.beans.factory.annotation.Value;
7 import org.springframework.context.annotation.Bean;
8 import org.springframework.context.annotation.Configuration;
9 import org.springframework.kafka.core.DefaultKafkaProducerFactory;
10 import org.springframework.kafka.core.KafkaTemplate;
11 import org.springframework.kafka.core.ProducerFactory;
12
13 import java.util.HashMap;
14 import java.util.Map;
15
16 @Configuration
17 public class SenderConfig {
18 @Value("${spring.kafka.bootstrap-servers}")
19 private String bootstrapServers;
20
21 @Bean
22 public Map<String, Object> producerConfigs() {
23 Map<String, Object> props = new HashMap<>();
24 props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
25 //接入协议。
26 props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
27 //请求的最长等待时间。
28 props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 30 * 1000);
29 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
30 props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
31 return props;
32 }
33
34 @Bean
35 public ProducerFactory<String, String> producerFactory() {
36 return new DefaultKafkaProducerFactory<>(producerConfigs());
37 }
38
39 @Bean
40 public KafkaTemplate<String, String> kafkaTemplate() {
41 return new KafkaTemplate<>(producerFactory());
42 }
43 }
向kafka发送信息
1 package priv.sinoam.controller;
2
3 import org.springframework.beans.factory.annotation.Autowired;
4 import org.springframework.beans.factory.annotation.Value;
5 import org.springframework.kafka.core.KafkaTemplate;
6 import org.springframework.web.bind.annotation.PostMapping;
7 import org.springframework.web.bind.annotation.RequestBody;
8 import org.springframework.web.bind.annotation.RequestMapping;
9 import org.springframework.web.bind.annotation.RestController;
10
11 @RestController
12 @RequestMapping("/kafka")
13 public class KafkaMsgSendController {
14
15 @Autowired
16 private KafkaTemplate<String, String> kafkaTemplate;
17
18 @Value("${kafka.topic.name}")
19 private String topicName;
20
21 @PostMapping("/sendMsg")
22 public String sendMsg(@RequestBody String jsonString) {
23 kafkaTemplate.send(topicName, jsonString);
24 return "Kafka Message Send OK!";
25 }
26 }
接收信息(监听信息)
1 package priv.sinoam.listener;
2
3 import lombok.extern.slf4j.Slf4j;
4 import org.apache.kafka.clients.consumer.ConsumerRecord;
5 import org.springframework.kafka.annotation.KafkaListener;
6 import org.springframework.kafka.support.Acknowledgment;
7 import org.springframework.kafka.support.KafkaHeaders;
8 import org.springframework.messaging.handler.annotation.Header;
9 import org.springframework.stereotype.Component;
10
11 import java.util.Optional;
12
13 @Component
14 @Slf4j
15 public class KafkaMessageListener {
16
17 // 通过注解注入所在的消费组和药监听的Topic
18 @KafkaListener(topics = "${kafka.topic.name}", groupId = "${kafka.group.id}", concurrency = "1")
19 public void topic_test(ConsumerRecord<?, ?> record, Acknowledgment ack, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) {
20 Optional message = Optional.ofNullable(record.value());
21 if (message.isPresent()) {
22 Object msg = message.get();
23 try {
24 // 这里写你对接收到的消息的处理逻辑
25 // 手动ACK
26 ack.acknowledge();
27 log.debug("Kafka消费成功! Topic:" + topic + ",Message:" + msg);
28 System.out.println("Kafka消费成功! Topic:" + topic + ",Message:" + msg);
29 } catch (Exception e) {
30 e.printStackTrace();
31 log.error("Kafka消费失败!Topic:" + topic + ",Message:" + msg, e);
32 System.out.println("Kafka消费失败!Topic:" + topic + ",Message:" + msg);
33 }
34 }
35 }
36 }
在client1中写了发送和接收,当发送接口执行时,接收会收到发送的信息
在client1中写了发送和接收,在client2中只写了接收,两个端的组是一样的,当发送接口执行时,client2收到信息,client1不能收到
在client1中写了发送和接收,在client2中只写了接收,修改client2的组,两个端的组是不同的,当发送接口执行时,client1收到信息,client2收到信息,并且收到了之前没有收到的所有值
问题
找不到ip:因为写的是localhost,需要些真实的局域网ip
kafka版本不匹配:去掉版本号,springcloud会自动匹配
本项目pom.xml
父级:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.6.6</version>
</parent>
<groupId>priv.sinoam</groupId>
<artifactId>kafka-test</artifactId>
<packaging>pom</packaging>
<version>1.0-SNAPSHOT</version>
<modules>
<module>kafka-client1</module>
<module>kafka-client2</module>
</modules>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
<dependencies>
<!-- https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-boot-starter -->
<!-- <dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.5.1</version>
</dependency>-->
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
<!-- <dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.28</version>
</dependency>-->
<!-- https://mvnrepository.com/artifact/org.projectlombok/lombok -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.22</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>2.2.13.RELEASE</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-bootstrap</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-openfeign</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-loadbalancer</artifactId>
</dependency>
<!-- nacos客户端依赖包 -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
<exclusions>
<exclusion>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-netflix-ribbon</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<!-- https://mvnrepository.com/artifact/org.springframework.cloud/spring-cloud-dependencies -->
<!-- https://mvnrepository.com/artifact/org.springframework.cloud/spring-cloud-dependencies -->
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>2021.0.1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<!-- nacos依赖 -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-alibaba-dependencies</artifactId>
<version>2.2.5.RELEASE</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
</project>
client1和client2:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>kafka-test</artifactId>
<groupId>priv.sinoam</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>kafka-client1</artifactId>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
</project>