线程启动类
package com.zkdj.message;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
*多线程任务类,分配多少个线程去执行任务
*/
import com.zkdj.message.server.Server;
public class Main {
public static void main(String[] args) {
//启动服务
//创建线程池
ExecutorService executorService = Executors.newFixedThreadPool(10);
ThreadPool threadPool = new ThreadPool();
for(int i =0;i<20;i++){
//为线程池分配任务
executorService.submit(threadPool);
}
//关闭线程池
executorService.shutdown();
}
}
class ThreadPool implements Runnable {
@Override
public void run() {
//kafka启动类
KafkaServer server = new KafkaServer();
server.start();
}
}
kafka消息消费类
package com.zkdj.message.KafkaServer;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;
import com.zkdj.message.common.ConfigManager;
import com.zkdj.message.common.Constants.Pks;
import net.sf.json.JSONArray;
import net.sf.json.JSONObject;
/**
* 并发请求
*
* @author sxj
*/
public class KafkaServer {
protected static final ConfigManager config = ConfigManager.getInstance();
private Logger log = Logger.getLogger("msg");
private static int dataSize = 0;
private static Lock lock = new ReentrantLock();
/**
* 开启服务 1. 一个线程去获取 OSS 文件的元数据, 只获取文件的名称, 把文件名称放到本地集合中 3. 开启多个线程消费文件名称集合 3-1.
* 每个线程获取文件内容,并将tmp文件删除,上传到save库中 3-2. 每个线程获取文件内容,删除tmp上传save的同时,发送POST请求到分析
*/
@SuppressWarnings("resource")
public void start() {
log.info("开启服务...");
// 每批处理最大文档数
int maxDocumentNumber = Integer.parseInt(config.get(Pks.MAX_DOCUMENT_NUMBER));
// ------------------------------------
try {
Properties props = new Properties();
props.put("bootstrap.servers",
"xgsj-kafka.istarshine.com:9092,xgsj-kafka.istarshine.com:9093,xgsj-kafka.istarshine.com:9094");
props.put("group.id", "g_zk");
props.put("enable.auto.commit", "true");
props.put("auto.offset.reset", "latest");
props.put("auto.commit.interval.ms", "1000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// acl Authorizer
props.put("security.protocol", "SASL_PLAINTEXT");
props.put("sasl.mechanism", "PLAIN");
String setProperty = System.setProperty("java.security.auth.login.config",
"/opt/hw/pull/kafka_client_jaasecs.conf"); // 配置文件路径
System.out.println("create KafkaConsumer");
System.out.println("receive data");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Collections.singletonList("user_subject_XXX"));
while (true) {
JSONArray weiboArticle = new JSONArray();
ConsumerRecords<String, String> records = consumer.poll(1000);
for (ConsumerRecord<String, String> record : records) {
JSONObject json = new JSONObject();
json = JSONObject.fromObject(record.value());
weiboArticle.add(json);
}
// 处理数据
//TODO
lock.lock();
batchSize += weiboArticle.size();
lock.unlock();
log.info("-------------------------------------------------------------------");
}
} catch (Exception ex) {
ex.printStackTrace();
System.out.println("when calling kafka output error." + ex.getMessage());
}
// ------------------------------------
}
}