ソースを参照

增加kafka 管理和消费组管理接口

sunhy 3 週間 前
コミット
e1071206b9

+ 1 - 1
.idea/misc.xml

@@ -8,7 +8,7 @@
       </list>
     </option>
   </component>
-  <component name="ProjectRootManager" version="2" project-jdk-name="corretto-1.8" project-jdk-type="JavaSDK">
+  <component name="ProjectRootManager" version="2" languageLevel="JDK_1_8" project-jdk-name="temurin-1.8" project-jdk-type="JavaSDK">
     <output url="file://$PROJECT_DIR$/out" />
   </component>
 </project>

+ 99 - 4
README.md

@@ -6,11 +6,12 @@
 
 ## 系统架构
 
-系统由个核心模块组成:
+系统由个核心模块组成:
 
 1. **schedule-producer**: 任务生产者模块,负责创建和发送任务到Kafka消息队列
 2. **schedule-consumer**: 任务消费者模块,负责从Kafka消费并处理任务
 3. **schedule-manager**: 管理模块,负责监控实例状态、管理Docker容器和动态扩缩容
+4. **schedule-monitor**: Kafka监控模块,提供Topic和消费组的监控管理功能
 
 ## 技术栈
 
@@ -70,6 +71,22 @@
 - 任务积压阈值: 100
 - GPU负载阈值: 80%
 
+### schedule-monitor
+
+Kafka监控模块,提供Topic和消费组的监控管理功能。
+
+**主要功能:**
+- Topic管理:获取Topic列表(支持分页)、创建Topic、删除Topic、修改Topic分区
+- Topic详情:查询指定Topic的详细信息,包括分区、消息数、副本数等
+- 分区消息查询:获取指定分区的消息列表
+- 消费组管理:获取消费组列表、消费组详情、删除消费组
+- 消费组监控:显示消费组成员、消费偏移量、消费延迟等信息
+
+**配置说明:**
+- 服务端口: 8094
+- Kafka服务器: 10.192.72.13:9092
+- 监控消费者组: monitor-group
+
 ## 环境要求
 
 - JDK 8+
@@ -113,7 +130,7 @@ mvn clean package
 
 ### 5. 启动服务
 
-分别启动个模块:
+分别启动个模块:
 
 ```bash
 # 启动Producer
@@ -124,6 +141,9 @@ java -jar schedule-consumer/target/schedule-consumer.jar
 
 # 启动Manager
 java -jar schedule-manager/target/schedule-manager.jar
+
+# 启动Monitor
+java -jar schedule-monitor/target/schedule-monitor.jar
 ```
 解析服务镜像打包:
 ```bash
@@ -152,6 +172,55 @@ POST http://localhost:8081/api/send
 
 Manager模块会自动监控任务处理情况和实例状态,并根据配置的阈值进行扩缩容。
 
+### Kafka监控
+
+通过Monitor模块提供的RESTful API监控Kafka:
+
+- **获取Topic列表(分页)**:
+  ```bash
+  curl "http://localhost:8094/api/kafka/monitor/topics?page=1&pageSize=10"
+  ```
+
+- **获取Topic详情**:
+  ```bash
+  curl "http://localhost:8094/api/kafka/monitor/topics/test-topic"
+  ```
+
+- **创建Topic**:
+  ```bash
+  curl -X POST "http://localhost:8094/api/kafka/monitor/topics" -H "Content-Type: application/json" -d "{\"topicName\":\"new-topic\",\"partitions\":3,\"replicationFactor\":1}"
+  ```
+
+- **删除Topic**:
+  ```bash
+  curl -X DELETE "http://localhost:8094/api/kafka/monitor/topics/test-topic"
+  ```
+
+- **修改Topic分区**:
+  ```bash
+  curl -X POST "http://localhost:8094/api/kafka/monitor/topics/test-topic/partitions" -H "Content-Type: application/json" -d "{\"partitions\":5}"
+  ```
+
+- **获取分区消息**:
+  ```bash
+  curl "http://localhost:8094/api/kafka/monitor/topics/test-topic/partitions/0/messages?startOffset=0&maxRecords=10"
+  ```
+
+- **获取消费组列表**:
+  ```bash
+  curl "http://localhost:8094/api/kafka/monitor/consumer-groups"
+  ```
+
+- **获取消费组详情**:
+  ```bash
+  curl "http://localhost:8094/api/kafka/monitor/consumer-groups/group-1"
+  ```
+
+- **删除消费组**:
+  ```bash
+  curl -X DELETE "http://localhost:8094/api/kafka/monitor/consumer-groups/group-1"
+  ```
+
 ## 配置说明
 
 ### Kafka配置
@@ -168,6 +237,25 @@ Manager模块提供了丰富的监控配置选项:
 - `task-backlog-threshold`: 任务积压阈值
 - `gpu-load-threshold`: GPU负载阈值
 
+### 监控模块配置 (schedule-monitor/src/main/resources/application.yml)
+
+```yaml
+server:
+  port: 8094
+
+spring:
+  application:
+    name: schedule-monitor
+  kafka:
+    bootstrap-servers: 10.192.72.13:9092
+    consumer:
+      group-id: monitor-group
+      enable-auto-commit: true
+      auto-offset-reset: earliest
+      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
+      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
+```
+
 ## 项目结构
 
 ```
@@ -187,9 +275,16 @@ four-level-schedule/
 │       │   ├── java/
 │       │   └── resources/
 │       └── test/
-└── schedule-manager/                # 管理模块
+├── schedule-manager/                # 管理模块
+│   ├── pom.xml
+│   ├── doc/                         # 文档目录
+│   └── src/
+│       ├── main/
+│       │   ├── java/
+│       │   └── resources/
+│       └── test/
+└── schedule-monitor/                # Kafka监控模块
     ├── pom.xml
-    ├── doc/                         # 文档目录
     └── src/
         ├── main/
         │   ├── java/

+ 1 - 0
pom.xml

@@ -12,6 +12,7 @@
         <module>schedule-producer</module>
         <module>schedule-consumer</module>
         <module>schedule-manager</module>
+        <module>schedule-monitor</module>
     </modules>
 
     <!-- 统一属性管理 -->

+ 60 - 0
schedule-monitor/pom.xml

@@ -0,0 +1,60 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.example</groupId>
+        <artifactId>four-level-schedule</artifactId>
+        <version>1.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>schedule-monitor</artifactId>
+    <packaging>jar</packaging>
+    <name>Schedule Monitor</name>
+
+    <properties>
+        <maven.compiler.source>8</maven.compiler.source>
+        <maven.compiler.target>8</maven.compiler.target>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-web</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework.kafka</groupId>
+            <artifactId>spring-kafka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kafka</groupId>
+            <artifactId>kafka-clients</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <optional>true</optional>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <finalName>schedule-monitor</finalName>
+        <plugins>
+            <plugin>
+                <groupId>org.springframework.boot</groupId>
+                <artifactId>spring-boot-maven-plugin</artifactId>
+                <version>${spring-boot.version}</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>repackage</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

+ 11 - 0
schedule-monitor/src/main/java/cn/com/yusys/monitor/MonitorApplication.java

@@ -0,0 +1,11 @@
+package cn.com.yusys.monitor;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+
+@SpringBootApplication
+public class MonitorApplication {
+    public static void main(String[] args) {
+        SpringApplication.run(MonitorApplication.class, args);
+    }
+}

+ 169 - 0
schedule-monitor/src/main/java/cn/com/yusys/monitor/controller/KafkaMonitorController.java

@@ -0,0 +1,169 @@
+package cn.com.yusys.monitor.controller;
+
+import cn.com.yusys.monitor.service.KafkaMonitorService;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.DeleteMapping;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.PathVariable;
+import org.springframework.web.bind.annotation.PostMapping;
+import org.springframework.web.bind.annotation.RequestBody;
+import org.springframework.web.bind.annotation.RequestMapping;
+import org.springframework.web.bind.annotation.RequestParam;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+@RestController
+@RequestMapping("/api/kafka/monitor")
+public class KafkaMonitorController {
+
+    @Autowired
+    private KafkaMonitorService kafkaMonitorService;
+
+    /**
+     * 获取指定topic的分区信息
+     */
+    @GetMapping("/topics/{topic}/partitions")
+    public ResponseEntity<Map<String, Object>> getTopicPartitions(@PathVariable String topic) {
+        Map<String, Object> result = kafkaMonitorService.getTopicPartitions(topic);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 获取指定topic各个分区的任务执行情况
+     */
+    @GetMapping("/topics/{topic}/task-status")
+    public ResponseEntity<Map<String, Object>> getPartitionTaskStatus(@PathVariable String topic) {
+        Map<String, Object> result = kafkaMonitorService.getPartitionTaskStatus(topic);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 获取默认测试topic的分区信息
+     */
+    @GetMapping("/test-topic/partitions")
+    public ResponseEntity<Map<String, Object>> getTestTopicPartitions() {
+        Map<String, Object> result = kafkaMonitorService.getTestTopicPartitions();
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 获取默认测试topic的任务执行情况
+     */
+    @GetMapping("/test-topic/task-status")
+    public ResponseEntity<Map<String, Object>> getTestTopicTaskStatus() {
+        Map<String, Object> result = kafkaMonitorService.getTestTopicTaskStatus();
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 获取所有kafka topic(带分页)
+     */
+    @GetMapping("/topics")
+    public ResponseEntity<Map<String, Object>> getAllTopics(
+            @RequestParam(defaultValue = "1") int page,
+            @RequestParam(defaultValue = "10") int pageSize) {
+        Map<String, Object> result = kafkaMonitorService.getAllTopics(page, pageSize);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 根据topic名称查询详情
+     */
+    @GetMapping("/topics/{topic}")
+    public ResponseEntity<Map<String, Object>> getTopicDetail(
+            @PathVariable String topic) {
+        Map<String, Object> topicDetail = kafkaMonitorService.getTopicDetail(topic);
+        return ResponseEntity.ok(topicDetail);
+    }
+
+    /**
+     * 获取分区消息列表
+     */
+    @GetMapping("/topics/{topic}/partitions/{partition}/messages")
+    public ResponseEntity<List<Map<String, Object>>> getPartitionMessages(
+            @PathVariable String topic,
+            @PathVariable int partition,
+            @RequestParam(defaultValue = "0") long startOffset,
+            @RequestParam(defaultValue = "10") int maxRecords) {
+        List<Map<String, Object>> messages = kafkaMonitorService.getPartitionMessages(topic, partition, startOffset, maxRecords);
+        return ResponseEntity.ok(messages);
+    }
+
+    /**
+     * 获取消费组列表
+     */
+    @GetMapping("/consumer-groups")
+    public ResponseEntity<List<Map<String, Object>>> getConsumerGroups() {
+        List<Map<String, Object>> consumerGroups = kafkaMonitorService.getConsumerGroups();
+        return ResponseEntity.ok(consumerGroups);
+    }
+
+    /**
+     * 获取消费组详情
+     */
+    @GetMapping("/consumer-groups/{groupId}")
+    public ResponseEntity<Map<String, Object>> getConsumerGroupDetail(
+            @PathVariable String groupId) {
+        Map<String, Object> groupDetail = kafkaMonitorService.getConsumerGroupDetail(groupId);
+        return ResponseEntity.ok(groupDetail);
+    }
+
+    /**
+     * 删除消费组
+     */
+    @DeleteMapping("/consumer-groups/{groupId}")
+    public ResponseEntity<Map<String, Object>> deleteConsumerGroup(
+            @PathVariable String groupId) {
+        Map<String, Object> result = kafkaMonitorService.deleteConsumerGroup(groupId);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 获取所有Kafka topic及其分区情况
+     */
+    @GetMapping("/topics-with-partitions")
+    public ResponseEntity<Map<String, Object>> getAllTopicsWithPartitions() {
+        Map<String, Object> result = kafkaMonitorService.getAllTopicsWithPartitions();
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 创建新的topic
+     */
+    @PostMapping("/topics")
+    public ResponseEntity<Map<String, Object>> createTopic(
+            @RequestBody Map<String, Object> request) {
+        String topicName = (String) request.get("topicName");
+        int partitions = request.containsKey("partitions") ? (Integer) request.get("partitions") : 3;
+        short replicationFactor = request.containsKey("replicationFactor") ? ((Number) request.get("replicationFactor")).shortValue() : 1;
+        
+        Map<String, Object> result = kafkaMonitorService.createTopic(topicName, partitions, replicationFactor);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 删除topic
+     */
+    @DeleteMapping("/topics/{topic}")
+    public ResponseEntity<Map<String, Object>> deleteTopic(@PathVariable String topic) {
+        Map<String, Object> result = kafkaMonitorService.deleteTopic(topic);
+        return ResponseEntity.ok(result);
+    }
+
+    /**
+     * 修改topic分区数量
+     */
+    @PostMapping("/topics/{topic}/partitions")
+    public ResponseEntity<Map<String, Object>> increasePartitions(
+            @PathVariable String topic,
+            @RequestBody Map<String, Object> request) {
+        int newPartitionCount = (Integer) request.get("newPartitionCount");
+        Map<String, Object> result = kafkaMonitorService.increasePartitions(topic, newPartitionCount);
+        return ResponseEntity.ok(result);
+    }
+}

+ 654 - 0
schedule-monitor/src/main/java/cn/com/yusys/monitor/service/KafkaMonitorService.java

@@ -0,0 +1,654 @@
+package cn.com.yusys.monitor.service;
+
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.admin.CreatePartitionsResult;
+import org.apache.kafka.clients.admin.CreateTopicsResult;
+import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult;
+import org.apache.kafka.clients.admin.DeleteTopicsResult;
+import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult;
+import org.apache.kafka.clients.admin.DescribeTopicsResult;
+import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult;
+import org.apache.kafka.clients.admin.ConsumerGroupListing;
+import org.apache.kafka.clients.admin.ListConsumerGroupsResult;
+import org.apache.kafka.clients.admin.ListTopicsResult;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.admin.NewPartitions;
+import org.apache.kafka.clients.admin.TopicDescription;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerGroupMetadata;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.stereotype.Service;
+
+import javax.annotation.PostConstruct;
+import javax.annotation.PreDestroy;
+import java.time.Duration;
+import java.util.*;
+import java.util.Date;
+import java.util.stream.Collectors;
+import java.util.concurrent.ExecutionException;
+
+@Service
+public class KafkaMonitorService {
+
+    private static final Logger log = LoggerFactory.getLogger(KafkaMonitorService.class);
+
+    @Value("${spring.kafka.bootstrap-servers}")
+    private String bootstrapServers;
+
+    @Value("${kafka.topics.test-topic}")
+    private String testTopic;
+
+    private AdminClient adminClient;
+    private KafkaConsumer<String, String> consumer;
+
+    @PostConstruct
+    public void init() {
+        // 初始化AdminClient
+        Properties adminProps = new Properties();
+        adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+        adminClient = AdminClient.create(adminProps);
+
+        // 初始化KafkaConsumer用于获取消费组信息
+        Properties consumerProps = new Properties();
+        consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+        consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "monitor-group");
+        consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+        consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+        consumer = new KafkaConsumer<>(consumerProps);
+    }
+
+    @PreDestroy
+    public void close() {
+        if (adminClient != null) {
+            adminClient.close();
+        }
+        if (consumer != null) {
+            consumer.close();
+        }
+    }
+
+    /**
+     * 获取指定topic的分区信息
+     */
+    public Map<String, Object> getTopicPartitions(String topic) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topic));
+            TopicDescription topicDescription = describeTopicsResult.values().get(topic).get();
+            
+            result.put("topic", topic);
+            result.put("partitionCount", topicDescription.partitions().size());
+            
+            List<Map<String, Object>> partitions = new ArrayList<>();
+            for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                Map<String, Object> partitionData = new HashMap<>();
+                partitionData.put("partition", partitionInfo.partition());
+                partitionData.put("leader", partitionInfo.leader().id());
+                partitionData.put("replicas", partitionInfo.replicas().size());
+                partitionData.put("inSyncReplicas", partitionInfo.isr().size());
+                partitions.add(partitionData);
+            }
+            result.put("partitions", partitions);
+        } catch (InterruptedException | ExecutionException e) {
+            log.error("Error getting topic partitions: {}", e.getMessage());
+            result.put("error", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 获取指定topic各个分区的任务数量执行情况
+     */
+    public Map<String, Object> getPartitionTaskStatus(String topic) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            // 订阅topic
+            consumer.subscribe(Collections.singleton(topic));
+            // 拉取消息以确保消费者加入组
+            consumer.poll(Duration.ofMillis(100));
+            
+            // 获取消费组信息
+            ConsumerGroupMetadata groupMetadata = consumer.groupMetadata();
+            String groupId = groupMetadata.groupId();
+            
+            // 获取所有分区
+            Set<TopicPartition> partitions = new HashSet<>();
+            for (TopicPartition partition : consumer.assignment()) {
+                if (partition.topic().equals(topic)) {
+                    partitions.add(partition);
+                }
+            }
+            
+            // 如果没有分区分配,手动创建分区列表
+            if (partitions.isEmpty()) {
+                DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topic));
+                TopicDescription topicDescription = describeTopicsResult.values().get(topic).get();
+                for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                    partitions.add(new TopicPartition(topic, partitionInfo.partition()));
+                }
+            }
+            
+            // 获取每个分区的最新偏移量
+            Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
+            // 获取每个分区的消费偏移量
+            Map<TopicPartition, OffsetAndMetadata> committedOffsets = consumer.committed(partitions);
+            
+            List<Map<String, Object>> partitionStatus = new ArrayList<>();
+            for (TopicPartition partition : partitions) {
+                Map<String, Object> status = new HashMap<>();
+                status.put("partition", partition.partition());
+                
+                long endOffset = endOffsets.get(partition);
+                status.put("totalTasks", endOffset);
+                
+                OffsetAndMetadata committedOffset = committedOffsets.get(partition);
+                long consumedOffset = committedOffset != null ? committedOffset.offset() : 0;
+                status.put("completedTasks", consumedOffset);
+                status.put("pendingTasks", endOffset - consumedOffset);
+                
+                partitionStatus.add(status);
+            }
+            
+            result.put("topic", topic);
+            result.put("groupId", groupId);
+            result.put("partitionStatus", partitionStatus);
+        } catch (Exception e) {
+            log.error("Error getting partition task status: {}", e.getMessage());
+            result.put("error", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 获取默认测试topic的分区情况
+     */
+    public Map<String, Object> getTestTopicPartitions() {
+        return getTopicPartitions(testTopic);
+    }
+
+    /**
+     * 获取默认测试topic的任务执行情况
+     */
+    public Map<String, Object> getTestTopicTaskStatus() {
+        return getPartitionTaskStatus(testTopic);
+    }
+
+    /**
+     * 获取所有Kafka topic及其消息数和副本数(带分页)
+     */
+    public Map<String, Object> getAllTopics(int page, int pageSize) {
+        Map<String, Object> result = new HashMap<>();
+        List<Map<String, Object>> topicsWithCount = new ArrayList<>();
+        try {
+            ListTopicsResult listTopicsResult = adminClient.listTopics();
+            Set<String> topicSet = listTopicsResult.names().get();
+            
+            List<String> topicList = new ArrayList<>(topicSet);
+            int totalTopics = topicList.size();
+            int totalPages = (int) Math.ceil((double) totalTopics / pageSize);
+            
+            int fromIndex = (page - 1) * pageSize;
+            int toIndex = Math.min(fromIndex + pageSize, totalTopics);
+            
+            List<String> pagedTopics = topicList.subList(fromIndex, toIndex);
+            
+            for (String topic : pagedTopics) {
+                Map<String, Object> topicData = new HashMap<>();
+                topicData.put("topic", topic);
+                
+                // 获取分区信息
+                DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topic));
+                TopicDescription topicDescription = describeTopicsResult.values().get(topic).get();
+                
+                // 获取每个分区的消息数
+                List<TopicPartition> partitions = new ArrayList<>();
+                for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                    partitions.add(new TopicPartition(topic, partitionInfo.partition()));
+                }
+                
+                // 获取分区末尾偏移量(消息总数)
+                Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
+                long totalMessages = endOffsets.values().stream().mapToLong(Long::longValue).sum();
+                
+                // 获取副本数(取第一个分区的副本数,所有分区副本数相同)
+                int replicationFactor = topicDescription.partitions().get(0).replicas().size();
+                
+                topicData.put("messageCount", totalMessages);
+                topicData.put("partitionCount", topicDescription.partitions().size());
+                topicData.put("replicationFactor", replicationFactor);
+                topicsWithCount.add(topicData);
+            }
+            
+            result.put("data", topicsWithCount);
+            result.put("total", totalTopics);
+            result.put("page", page);
+            result.put("pageSize", pageSize);
+            result.put("totalPages", totalPages);
+        } catch (Exception e) {
+            log.error("Error getting topics with message count: {}", e.getMessage());
+            result.put("error", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 根据topic名称查询详情
+     */
+    public Map<String, Object> getTopicDetail(String topic) {
+        Map<String, Object> topicDetail = new HashMap<>();
+        try {
+            // 获取分区信息
+            DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topic));
+            TopicDescription topicDescription = describeTopicsResult.values().get(topic).get();
+            
+            topicDetail.put("topic", topic);
+            topicDetail.put("partitionCount", topicDescription.partitions().size());
+            
+            // 获取副本数(取第一个分区的副本数,所有分区副本数相同)
+            int replicationFactor = topicDescription.partitions().get(0).replicas().size();
+            topicDetail.put("replicationFactor", replicationFactor);
+            
+            // 获取每个分区的消息数
+            List<TopicPartition> partitions = new ArrayList<>();
+            for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                partitions.add(new TopicPartition(topic, partitionInfo.partition()));
+            }
+            
+            // 获取分区末尾偏移量(消息总数)
+            Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
+            long totalMessages = endOffsets.values().stream().mapToLong(Long::longValue).sum();
+            topicDetail.put("messageCount", totalMessages);
+            
+            // 获取分区详细信息
+            List<Map<String, Object>> partitionsDetail = new ArrayList<>();
+            for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                Map<String, Object> partitionData = new HashMap<>();
+                partitionData.put("partition", partitionInfo.partition());
+                partitionData.put("leader", partitionInfo.leader().host() + ":" + partitionInfo.leader().port());
+                
+                List<String> replicas = new ArrayList<>();
+                for (org.apache.kafka.common.Node replica : partitionInfo.replicas()) {
+                    replicas.add(replica.host() + ":" + replica.port());
+                }
+                partitionData.put("replicas", replicas);
+                
+                List<String> isr = new ArrayList<>();
+                for (org.apache.kafka.common.Node node : partitionInfo.isr()) {
+                    isr.add(node.host() + ":" + node.port());
+                }
+                partitionData.put("isr", isr);
+                
+                // 获取该分区的消息数
+                TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
+                Long partitionMessageCount = endOffsets.get(topicPartition);
+                partitionData.put("messageCount", partitionMessageCount);
+                partitionData.put("endOffset", partitionMessageCount);
+                
+                // 获取当前消费者组的offset
+                OffsetAndMetadata currentOffset = consumer.committed(topicPartition);
+                if (currentOffset != null) {
+                    partitionData.put("currentOffset", currentOffset.offset());
+                    partitionData.put("lag", partitionMessageCount - currentOffset.offset());
+                } else {
+                    partitionData.put("currentOffset", 0);
+                    partitionData.put("lag", partitionMessageCount);
+                }
+                
+                partitionsDetail.add(partitionData);
+            }
+            topicDetail.put("partitionsDetail", partitionsDetail);
+            
+        } catch (Exception e) {
+            log.error("Error getting topic detail: {}", e.getMessage());
+            topicDetail.put("status", "error");
+            topicDetail.put("message", "Topic not found or error occurred: " + e.getMessage());
+        }
+        return topicDetail;
+    }
+
+    /**
+     * 获取分区消息列表
+     */
+    public List<Map<String, Object>> getPartitionMessages(String topic, int partition, long startOffset, int maxRecords) {
+        List<Map<String, Object>> messages = new ArrayList<>();
+        KafkaConsumer<String, String> partitionConsumer = null;
+        
+        try {
+            // 创建临时消费者
+            Properties props = new Properties();
+            props.put("bootstrap.servers", "10.192.72.13:9092");
+            props.put("group.id", "monitor-group-temp");
+            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+            props.put("auto.offset.reset", "earliest");
+            props.put("enable.auto.commit", "false");
+            
+            partitionConsumer = new KafkaConsumer<>(props);
+            
+            // 订阅指定分区
+            TopicPartition topicPartition = new TopicPartition(topic, partition);
+            partitionConsumer.assign(Collections.singletonList(topicPartition));
+            
+            // 定位到指定的起始offset
+            partitionConsumer.seek(topicPartition, startOffset);
+            
+            // 消费消息
+            ConsumerRecords<String, String> records = partitionConsumer.poll(Duration.ofMillis(10000));
+            int count = 0;
+            
+            for (ConsumerRecord<String, String> record : records) {
+                if (count >= maxRecords) {
+                    break;
+                }
+                
+                Map<String, Object> message = new HashMap<>();
+                message.put("offset", record.offset());
+                message.put("timestamp", new Date(record.timestamp()).toString());
+                message.put("key", record.key());
+                message.put("value", record.value());
+                message.put("partition", record.partition());
+                
+                messages.add(message);
+                count++;
+            }
+            
+        } catch (Exception e) {
+            log.error("Error getting partition messages: {}", e.getMessage());
+        } finally {
+            if (partitionConsumer != null) {
+                partitionConsumer.close();
+            }
+        }
+        
+        return messages;
+    }
+
+    /**
+     * 获取消费组列表
+     */
+    public List<Map<String, Object>> getConsumerGroups() {
+        List<Map<String, Object>> consumerGroups = new ArrayList<>();
+        try {
+            ListConsumerGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups();
+            Collection<ConsumerGroupListing> consumerGroupListings = listConsumerGroupsResult.all().get();
+            
+            for (ConsumerGroupListing group : consumerGroupListings) {
+                Map<String, Object> groupData = new HashMap<>();
+                groupData.put("groupId", group.groupId());
+                
+                // 获取消费组成员信息
+                List<Map<String, Object>> members = new ArrayList<>();
+                try {
+                    DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups(Collections.singletonList(group.groupId()));
+                    Map<String, ConsumerGroupDescription> groupDescriptions = describeConsumerGroupsResult.all().get();
+                    
+                    ConsumerGroupDescription description = groupDescriptions.get(group.groupId());
+                    if (description != null) {
+                        for (MemberDescription member : description.members()) {
+                            Map<String, Object> memberData = new HashMap<>();
+                            memberData.put("consumerId", member.consumerId());
+                            memberData.put("clientId", member.clientId());
+                            memberData.put("host", member.host());
+                            
+                            // 获取成员分配的分区
+                            List<Map<String, Object>> topicPartitions = new ArrayList<>();
+                            for (TopicPartition tp : member.assignment().topicPartitions()) {
+                                Map<String, Object> tpData = new HashMap<>();
+                                tpData.put("topic", tp.topic());
+                                tpData.put("partition", tp.partition());
+                                topicPartitions.add(tpData);
+                            }
+                            memberData.put("topicPartitions", topicPartitions);
+                            members.add(memberData);
+                        }
+                    }
+                } catch (Exception e) {
+                    log.error("Error getting consumer group members: {}", e.getMessage());
+                }
+                
+                groupData.put("members", members);
+                consumerGroups.add(groupData);
+            }
+        } catch (Exception e) {
+            log.error("Error getting consumer groups: {}", e.getMessage());
+        }
+        return consumerGroups;
+    }
+
+    /**
+     * 获取指定分区的日志末尾偏移量
+     */
+    private long getEndOffset(String topic, int partition) {
+        KafkaConsumer<String, String> consumer = null;
+        try {
+            Properties props = new Properties();
+            props.put("bootstrap.servers", "10.192.72.13:9092");
+            props.put("group.id", "monitor-group-temp");
+            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
+            props.put("auto.offset.reset", "latest");
+            
+            consumer = new KafkaConsumer<>(props);
+            TopicPartition tp = new TopicPartition(topic, partition);
+            consumer.assign(Collections.singletonList(tp));
+            consumer.seekToEnd(Collections.singletonList(tp));
+            return consumer.position(tp);
+        } catch (Exception e) {
+            log.error("Error getting end offset: {}", e.getMessage());
+            return 0;
+        } finally {
+            if (consumer != null) {
+                consumer.close();
+            }
+        }
+    }
+
+    /**
+     * 获取消费组详情
+     */
+    public Map<String, Object> getConsumerGroupDetail(String groupId) {
+        Map<String, Object> groupDetail = new HashMap<>();
+        try {
+            DescribeConsumerGroupsResult describeConsumerGroupsResult = adminClient.describeConsumerGroups(Collections.singletonList(groupId));
+            Map<String, ConsumerGroupDescription> groupDescriptions = describeConsumerGroupsResult.all().get();
+            
+            ConsumerGroupDescription description = groupDescriptions.get(groupId);
+            if (description != null) {
+                groupDetail.put("groupId", description.groupId());
+                groupDetail.put("state", description.state());
+                groupDetail.put("isSimpleConsumerGroup", description.isSimpleConsumerGroup());
+                
+                List<Map<String, Object>> members = new ArrayList<>();
+                for (MemberDescription member : description.members()) {
+                    Map<String, Object> memberData = new HashMap<>();
+                    memberData.put("consumerId", member.consumerId());
+                    memberData.put("clientId", member.clientId());
+                    memberData.put("host", member.host());
+                    
+                    // 获取成员分配的分区
+                    Map<TopicPartition, OffsetAndMetadata> assignments = member.assignment().topicPartitions().stream()
+                            .collect(Collectors.toMap(tp -> tp, tp -> new OffsetAndMetadata(0)));
+                    List<Map<String, Object>> topicPartitions = new ArrayList<>();
+                    for (TopicPartition tp : member.assignment().topicPartitions()) {
+                        Map<String, Object> tpData = new HashMap<>();
+                        tpData.put("topic", tp.topic());
+                        tpData.put("partition", tp.partition());
+                        topicPartitions.add(tpData);
+                    }
+                    memberData.put("topicPartitions", topicPartitions);
+                    members.add(memberData);
+                }
+                groupDetail.put("members", members);
+                
+                // 获取消费组的偏移量
+                ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult = adminClient.listConsumerGroupOffsets(groupId);
+                Map<TopicPartition, OffsetAndMetadata> offsets = listConsumerGroupOffsetsResult.partitionsToOffsetAndMetadata().get();
+                List<Map<String, Object>> offsetDetails = new ArrayList<>();
+                for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
+                    TopicPartition tp = entry.getKey();
+                    long currentOffset = entry.getValue().offset();
+                    long endOffset = getEndOffset(tp.topic(), tp.partition());
+                    long lag = endOffset - currentOffset;
+                    
+                    Map<String, Object> offsetData = new HashMap<>();
+                    offsetData.put("topic", tp.topic());
+                    offsetData.put("partition", tp.partition());
+                    offsetData.put("currentOffset", currentOffset);
+                    offsetData.put("endOffset", endOffset);
+                    offsetData.put("lag", lag);
+                    offsetDetails.add(offsetData);
+                }
+                groupDetail.put("offsets", offsetDetails);
+            } else {
+                groupDetail.put("status", "error");
+                groupDetail.put("message", "Consumer group not found");
+            }
+        } catch (Exception e) {
+            log.error("Error getting consumer group detail: {}", e.getMessage());
+            groupDetail.put("status", "error");
+            groupDetail.put("message", "Error getting consumer group detail: " + e.getMessage());
+        }
+        return groupDetail;
+    }
+
+    /**
+     * 删除消费组
+     */
+    public Map<String, Object> deleteConsumerGroup(String groupId) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            DeleteConsumerGroupsResult deleteConsumerGroupsResult = adminClient.deleteConsumerGroups(Collections.singletonList(groupId));
+            deleteConsumerGroupsResult.all().get();
+            result.put("status", "success");
+            result.put("message", "Consumer group deleted successfully");
+            result.put("groupId", groupId);
+        } catch (Exception e) {
+            log.error("Error deleting consumer group: {}", e.getMessage());
+            result.put("status", "error");
+            result.put("message", "Error deleting consumer group: " + e.getMessage());
+            result.put("groupId", groupId);
+        }
+        return result;
+    }
+
+    /**
+     * 获取所有Kafka topic及其分区情况
+     */
+    public Map<String, Object> getAllTopicsWithPartitions() {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            ListTopicsResult listTopicsResult = adminClient.listTopics();
+            Set<String> topicSet = listTopicsResult.names().get();
+            
+            List<Map<String, Object>> topicList = new ArrayList<>();
+            for (String topic : topicSet) {
+                Map<String, Object> topicData = new HashMap<>();
+                topicData.put("topic", topic);
+                
+                DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singleton(topic));
+                TopicDescription topicDescription = describeTopicsResult.values().get(topic).get();
+                
+                topicData.put("partitionCount", topicDescription.partitions().size());
+                
+                List<Map<String, Object>> partitions = new ArrayList<>();
+                for (org.apache.kafka.common.TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
+                    Map<String, Object> partitionData = new HashMap<>();
+                    partitionData.put("partition", partitionInfo.partition());
+                    partitionData.put("leader", partitionInfo.leader().id());
+                    partitionData.put("replicas", partitionInfo.replicas().size());
+                    partitionData.put("inSyncReplicas", partitionInfo.isr().size());
+                    partitions.add(partitionData);
+                }
+                topicData.put("partitions", partitions);
+                topicList.add(topicData);
+            }
+            
+            result.put("topics", topicList);
+            result.put("totalTopics", topicSet.size());
+        } catch (InterruptedException | ExecutionException e) {
+            log.error("Error getting all topics with partitions: {}", e.getMessage());
+            result.put("error", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 创建新的topic
+     */
+    public Map<String, Object> createTopic(String topicName, int partitions, short replicationFactor) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor);
+            CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singleton(newTopic));
+            createTopicsResult.all().get();
+            
+            result.put("status", "success");
+            result.put("message", "Topic created successfully");
+            result.put("topic", topicName);
+            result.put("partitions", partitions);
+            result.put("replicationFactor", replicationFactor);
+        } catch (Exception e) {
+            log.error("Error creating topic: {}", e.getMessage());
+            result.put("status", "error");
+            result.put("message", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 删除topic
+     */
+    public Map<String, Object> deleteTopic(String topicName) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Collections.singleton(topicName));
+            deleteTopicsResult.all().get();
+            
+            result.put("status", "success");
+            result.put("message", "Topic deleted successfully");
+            result.put("topic", topicName);
+        } catch (Exception e) {
+            log.error("Error deleting topic: {}", e.getMessage());
+            result.put("status", "error");
+            result.put("message", e.getMessage());
+        }
+        return result;
+    }
+
+    /**
+     * 增加topic的分区数量(只能增加,不能减少)
+     */
+    public Map<String, Object> increasePartitions(String topicName, int newPartitionCount) {
+        Map<String, Object> result = new HashMap<>();
+        try {
+            Map<String, NewPartitions> partitionsMap = new HashMap<>();
+            NewPartitions newPartitions = NewPartitions.increaseTo(newPartitionCount);
+            partitionsMap.put(topicName, newPartitions);
+            
+            CreatePartitionsResult createPartitionsResult = adminClient.createPartitions(partitionsMap);
+            createPartitionsResult.all().get();
+            
+            result.put("status", "success");
+            result.put("message", "Topic partitions increased successfully");
+            result.put("topic", topicName);
+            result.put("newPartitionCount", newPartitionCount);
+        } catch (Exception e) {
+            log.error("Error increasing partitions: {}", e.getMessage());
+            result.put("status", "error");
+            result.put("message", e.getMessage());
+        }
+        return result;
+    }
+}

+ 13 - 0
schedule-monitor/src/main/resources/application.yml

@@ -0,0 +1,13 @@
+server:
+  port: 8094
+
+spring:
+  application:
+    name: schedule-monitor
+  kafka:
+    bootstrap-servers: 10.192.72.13:9092
+
+# Kafka监控配置
+kafka:
+  topics:
+    test-topic: test-topic