Bootstrap

java查看kafka数据量_Java kafka监控 topic的数据量count情况,每个topic的offset,

Java使用kafka的API来监控kafka的某些topic的数据量增量,offset,定时查总量之后,然后计算差值,然后就可以算单位间隔的每个topic的增量,kafka监控一般都是监控的吞吐量,即数据量的大小,而不在意这个count,数量。额,这个就是在意count。统计一下count。

使用的jar依赖

compile group: 'org.apache.kafka', name: 'kafka_2.10', version: '0.8.0'

Java代码

import com.google.common.collect.Lists;

import com.google.common.collect.Maps;

import kafka.api.PartitionOffsetRequestInfo;

import kafka.common.TopicAndPartition;

import kafka.javaapi.OffsetResponse;

import kafka.javaapi.PartitionMetadata;

import kafka.javaapi.TopicMetadata;

import kafka.javaapi.TopicMetadataRequest;

import kafka.javaapi.consumer.SimpleConsumer;

import org.slf4j.Logger;

import org.slf4j.LoggerFactory;

import java.util.Date;

import java.util.List;

import java.util.Map;

/**

* kafka监控 topic的数据消费情况

*

* @author LiXuekai on 2020/9/16

*/

public class KafkaMonitorTools {

private static final Logger LOGGER = LoggerFactory.getLogger(KafkaMonitorTools.class);

public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {

TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);

Map requestInfo = Maps.newHashMap();

requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));

kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);

OffsetResponse response = consumer.getOffsetsBefore(request);

if (response.hasError()) {

LOGGER.error("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition));

return 0;

}

long[] offsets = response.offsets(topic, partition);

return offsets[0];

}

/**

* @param brokers broker 地址

* @param topic topic

* @return map

*/

public static Map findLeader(List brokers, String topic) {

Map map = Maps.newHashMap();

for (String broker : brokers) {

SimpleConsumer consumer = null;

try {

String[] hostAndPort = broker.split(":");

consumer = new SimpleConsumer(hostAndPort[0], Integer.parseInt(hostAndPort[1]), 100000, 64 * 1024, "leaderLookup" + new Date().getTime());

List topics = Lists.newArrayList(topic);

TopicMetadataRequest req = new TopicMetadataRequest(topics);

kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

List metaData = resp.topicsMetadata();

for (TopicMetadata item : metaData) {

for (PartitionMetadata part : item.partitionsMetadata()) {

map.put(part.partitionId(), part);

}

}

} catch (Exception e) {

LOGGER.error("Error communicating with Broker [" + broker + "] to find Leader for [" + topic + ", ] Reason: " + e);

} finally {

if (consumer != null)

consumer.close();

}

}

return map;

}

public static Map monitor(List brokers, List topics) {

if (brokers == null || brokers.isEmpty()) {

return null;

}

if (topics == null || topics.isEmpty()) {

return null;

}

Map map = Maps.newTreeMap();

for (String topicName : topics) {

Map metadata = findLeader(brokers, topicName);

long size = 0L;

for (Map.Entry entry : metadata.entrySet()) {

int partition = entry.getKey();

String leadBroker = entry.getValue().leader().host();

String clientName = "Client_" + topicName + "_" + partition;

SimpleConsumer consumer = new SimpleConsumer(leadBroker, entry.getValue().leader().port(), 100000, 64 * 1024, clientName);

long readOffset = getLastOffset(consumer, topicName, partition, kafka.api.OffsetRequest.LatestTime(), clientName);

size += readOffset;

consumer.close();

}

map.put(topicName, size);

}

return map;

}

}

测试代码:

@Test

public void monitor() {

Map monitor = KafkaMonitorTools.monitor(Lists.newArrayList(server), Lists.newArrayList(topics.split(",")));

monitor.forEach((k, v)-> System.out.println(k + " " + v));

}

运行结果截图:

fdb37d8e542c4176c10e9faa24b304a6.png

;