引言

kafka 系列相关文章:

  • 消息队列 Kafka 入门篇(一) --简介与核心知识点梳理
  • 消息队列 Kafka 入门篇(二) – 安装启动与可视化工具

一、运行环境 :

  • windows10
  • jdk 11
  • kafka 2.13

二、springboot 项目集成 kafka

1. 添加 kafka Maven 依赖:

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
    <version>2.5.10.RELEASE</version>
</dependency>

2. 配置 application.yml

spring:
  application:
    name: application-kafka
  kafka:
    bootstrap-servers: localhost:9092 #这个是kafka的地址,对应你server.properties中配置的
    producer:
      batch-size: 16384 #批量大小
      acks: -1 #应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
      retries: 10 # 消息发送重试次数
      #transaction-id-prefix: transaction
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      properties:
        linger:
          ms: 2000 #提交延迟
        #partitioner: #指定分区器
          #class: pers.zhang.config.CustomerPartitionHandler
    consumer:
      group-id: testGroup #默认的消费组ID
      enable-auto-commit: true #是否自动提交offset
      auto-commit-interval: 2000 #提交offset延时
      # 当kafka中没有初始offset或offset超出范围时将自动重置offset
      # earliest:重置为分区中最小的offset;
      # latest:重置为分区中最新的offset(消费分区中新产生的数据);
      # none:只要有一个分区不存在已提交的offset,就抛出异常;
      auto-offset-reset: latest
      max-poll-records: 500 #单次拉取消息的最大条数
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      properties:
        session:
          timeout:
            ms: 120000 # 消费会话超时时间(超过这个时间 consumer 没有发送心跳,就会触发 rebalance 操作)
        request:
          timeout:
            ms: 18000 # 消费请求的超时时间
    listener:
      missing-topics-fatal: false # consumer listener topics 不存在时,启动项目就会报错
#      type: batch

3.kafka 生产者

@RestController
public class kafkaProducer {

    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;

    /**
     * 1.简单生产者
     * @param message
     */
    @GetMapping("/kafka/normal/{message}")
    public void sendNormalMessage(@PathVariable("message") String message) {
        kafkaTemplate.send("sb_topic", message);
    }


    /**
     * 2.带回调的生产者
     * 回调的第一种写法
     * @param message
     */
    @GetMapping("/kafka/callbackOne/{message}")
    public void sendCallbackOneMessage(@PathVariable("message") String message) {
        kafkaTemplate.send("sb_topic", message).addCallback(new SuccessCallback<SendResult<String, Object>>() {
            //成功的回调
            @Override
            public void onSuccess(SendResult<String, Object> success) {
                // 消息发送到的topic
                String topic = success.getRecordMetadata().topic();
                // 消息发送到的分区
                int partition = success.getRecordMetadata().partition();
                // 消息在分区内的offset
                long offset = success.getRecordMetadata().offset();
                System.out.println("发送消息成功1:" + topic + "-" + partition + "-" + offset);
            }
        }, new FailureCallback() {
            //失败的回调
            @Override
            public void onFailure(Throwable throwable) {
                System.out.println("发送消息失败1:" + throwable.getMessage());
            }
        });
    }


    /**
     * 3.带回调的生产者
     * 回调的第二种写法
     * @param message
     */
    @GetMapping("/kafka/callbackTwo/{message}")
    public void sendCallbackTwoMessage(@PathVariable("message") String message) {
        kafkaTemplate.send("sb_topic", message).addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
            @Override
            public void onFailure(Throwable throwable) {
                System.out.println("发送消息失败2:"+throwable.getMessage());
            }

            @Override
            public void onSuccess(SendResult<String, Object> result) {
                System.out.println("发送消息成功2:" + result.getRecordMetadata().topic() + "-"
                        + result.getRecordMetadata().partition() + "-" + result.getRecordMetadata().offset());
            }
        });
    }

    /**
     * 4. 事务的生产者
     * @param message
     */
    @GetMapping("/kafka/transaction/{message}")
    public void sendTransactionMessage(@PathVariable("message") String message) {
        //声明事务:后面报错消息不会发出去
        kafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback<String, Object, Object>() {

            @Override
            public Object doInOperations(KafkaOperations<String, Object> operations) {
                operations.send("sb_topic", message + " test executeInTransaction");
                throw new RuntimeException("fail");
            }
        });
        // //不声明事务:后面报错但前面消息已经发送成功了
        // kafkaTemplate.send("sb_topic", message + " test executeInNoTransaction");
        // throw new RuntimeException("fail");
    }

4.kafka 消费者

@Component
public class KafkaConsumer {

    //监听消费
    @KafkaListener(topics = {"sb_topic"},groupId = "testGroup")
    public void onNormalMessage(ConsumerRecord<String, Object> record) {
        System.out.println("简单消费:" + record.topic() + "-" + record.partition() + "=" +
                record.value());
    }

}

/**
 * 指定多个主题。
 *
 * @param record
 */
@KafkaListener(topics = {"sb_topic","sb_topic1"},groupId = "testGroup")
public void topics(ConsumerRecord<String, String> record) {
    System.out.println("进入topics方法");
    System.out.printf(
            "主题 = %s,分区 = %d, 偏移量 = %d, key = %s, 内容 = %s,创建消息的时间戳 =%d%n",
            record.topic(),
            record.partition(),
            record.offset(),
            record.key(),
            record.value(),
            record.timestamp()
    );
}


/**
 * 监听一个主题,且指定消费主题的哪些分区。
 * 参数详解:消费者组=apple_group;监听主题=iphoneTopic;只消费的分区=1,2;消费者数量=2
 * @param record
 */
@KafkaListener(
        groupId = "APPLE_GROUP",
        topicPartitions = {
                @TopicPartition(topic = "IPHONE_TOPIC", partitions = {"1", "2"})
        },
        concurrency = "2"
)
public void consumeByPattern(ConsumerRecord<String, String> record) {
    System.out.println("consumeByPattern");
    System.out.printf(
            "主题 = %s,分区 = %d, 偏移量 = %d, key = %s, 内容 = %s,创建消息的时间戳 =%d%n",
            record.topic(),
            record.partition(),
            record.offset(),
            record.key(),
            record.value(),
            record.timestamp()
    );
}

/**
 * 指定多个分区从哪个偏移量开始消费。
 * 10个线程,也就是10个消费者
 */
@KafkaListener(
        groupId = "APPLE_GROUP",
        topicPartitions = {
                @TopicPartition(
                        topic = "IPAD_TOPIC",
                        partitions = {"0","1"},
                        partitionOffsets = {
                                @PartitionOffset(partition = "2", initialOffset = "10"),
                                @PartitionOffset(partition = "3", initialOffset = "0"),
                        }
                )
        },
        concurrency = "10"
)
public void consumeByPartitionOffsets(ConsumerRecord<String, String> record) {
    System.out.println("consumeByPartitionOffsets");
    System.out.printf(
            "主题 = %s,分区 = %d, 偏移量 = %d, key = %s, 内容 = %s,创建消息的时间戳 =%d%n",
            record.topic(),
            record.partition(),
            record.offset(),
            record.key(),
            record.value(),
            record.timestamp()
    );
}

5. kafka 监听器配置类

Kafka提供了ProducerListener 监听器来异步监听生产者消息是否发送成功,我们可以自定义一个kafkaTemplate添加ProducerListener,当消息发送失败我们可以拿到消息进行重试或者把失败消息记录到数据库定时重试。

@Configuration
public class KafkaConfig {

    @Autowired
    ProducerFactory producerFactory;

    @Bean
    public KafkaTemplate<String, Object> kafkaTemplate() {
        KafkaTemplate<String, Object> kafkaTemplate = new KafkaTemplate<String, Object>(producerFactory);
        //当我们发送一条消息,既会走 ListenableFutureCallback 回调,也会走ProducerListener回调
        kafkaTemplate.setProducerListener(new ProducerListener<String, Object>() {
            @Override
            public void onSuccess(ProducerRecord<String, Object> producerRecord, RecordMetadata recordMetadata) {
                System.out.println("发送成功 " + producerRecord.toString());
            }

            @Override
            public void onError(ProducerRecord<String, Object> producerRecord, RecordMetadata recordMetadata, Exception exception) {
                System.out.println("发送失败" + producerRecord.toString());
                System.out.println(exception.getMessage());
            }

        });
        return kafkaTemplate;
    }
}

注意:当我们发送一条消息,既会走 ListenableFutureCallback 回调,也会走ProducerListener回调。

6.事务提交

如果在发送消息时需要创建事务,可以使用 KafkaTemplate 的 executeInTransaction 方法来声明事务:

@GetMapping("/kafka/transaction/{message}")
public void sendTransactionMessage(@PathVariable("message") String message) {
    //声明事务:后面报错消息不会发出去
    kafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback<String, Object, Object>() {

        @Override
        public Object doInOperations(KafkaOperations<String, Object> operations) {
            operations.send("sb_topic", message + " test executeInTransaction");
            throw new RuntimeException("fail");
        }
    });
    // //不声明事务:后面报错但前面消息已经发送成功了
    // kafkaTemplate.send("sb_topic", message + " test executeInNoTransaction");
    // throw new RuntimeException("fail");
}

注意:如果声明了事务,需要在application.yml中指定:

spring:
    kafka:
        producer:
        transaction-id-prefix: tx_ #事务id前缀

本文示例源码:点击下载