Skip to content

Commit

Permalink
Merge pull request didi#479 from didi/dev
Browse files Browse the repository at this point in the history
集成测试&单元测试补充
  • Loading branch information
ZQKC authored Mar 15, 2022
2 parents 734a020 + bd62212 commit 7c0e9df
Show file tree
Hide file tree
Showing 171 changed files with 21,563 additions and 124 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -111,3 +111,4 @@ dist/
dist/*
kafka-manager-web/src/main/resources/templates/
.DS_Store
kafka-manager-console/package-lock.json
47 changes: 47 additions & 0 deletions docs/dev_guide/LogiKM单元测试和集成测试.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@

---

![kafka-manager-logo](../assets/images/common/logo_name.png)

**一站式`Apache Kafka`集群指标监控与运维管控平台**

---


# LogiKM单元测试和集成测试

## 1、单元测试
### 1.1 单元测试介绍
单元测试又称模块测试,是针对软件设计的最小单位——程序模块进行正确性检验的测试工作。
其目的在于检查每个程序单元能否正确实现详细设计说明中的模块功能、性能、接口和设计约束等要求,
发现各模块内部可能存在的各种错误。单元测试需要从程序的内部结构出发设计测试用例。
多个模块可以平行地独立进行单元测试。

### 1.2 LogiKM单元测试思路
LogiKM单元测试思路主要是测试Service层的方法,通过罗列方法的各种参数,
判断方法返回的结果是否符合预期。单元测试的基类加了@SpringBootTest注解,即每次运行单测用例都启动容器

### 1.3 LogiKM单元测试注意事项
1. 单元测试用例在kafka-manager-core以及kafka-manager-extends下的test包中
2. 配置在resources/application.yml,包括运行单元测试用例启用的数据库配置等等
3. 编译打包项目时,加上参数-DskipTests可不执行测试用例,例如使用命令行mvn -DskipTests进行打包




## 2、集成测试
### 2.1 集成测试介绍
集成测试又称组装测试,是一种黑盒测试。通常在单元测试的基础上,将所有的程序模块进行有序的、递增的测试。
集成测试是检验程序单元或部件的接口关系,逐步集成为符合概要设计要求的程序部件或整个系统。

### 2.2 LogiKM集成测试思路
LogiKM集成测试主要思路是对Controller层的接口发送Http请求。
通过罗列测试用例,模拟用户的操作,对接口发送Http请求,判断结果是否达到预期。
本地运行集成测试用例时,无需加@SpringBootTest注解(即无需每次运行测试用例都启动容器)

### 2.3 LogiKM集成测试注意事项
1. 集成测试用例在kafka-manager-web的test包下
2. 因为对某些接口发送Http请求需要先登陆,比较麻烦,可以绕过登陆,方法可见教程见docs -> user_guide -> call_api_bypass_login
3. 集成测试的配置在resources/integrationTest-settings.properties文件下,包括集群地址,zk地址的配置等等
4. 如果需要运行集成测试用例,需要本地先启动LogiKM项目
5. 编译打包项目时,加上参数-DskipTests可不执行测试用例,例如使用命令行mvn -DskipTests进行打包
Original file line number Diff line number Diff line change
Expand Up @@ -82,46 +82,46 @@ public String toString()
return JSON.toJSONString(this);
}

public static Result buildSuc() {
Result result = new Result();
public static <T> Result<T> buildSuc() {
Result<T> result = new Result<>();
result.setCode(ResultStatus.SUCCESS.getCode());
result.setMessage(ResultStatus.SUCCESS.getMessage());
return result;
}

public static <T> Result<T> buildSuc(T data) {
Result<T> result = new Result<T>();
Result<T> result = new Result<>();
result.setCode(ResultStatus.SUCCESS.getCode());
result.setMessage(ResultStatus.SUCCESS.getMessage());
result.setData(data);
return result;
}

public static <T> Result<T> buildGatewayFailure(String message) {
Result<T> result = new Result<T>();
Result<T> result = new Result<>();
result.setCode(ResultStatus.GATEWAY_INVALID_REQUEST.getCode());
result.setMessage(message);
result.setData(null);
return result;
}

public static <T> Result<T> buildFailure(String message) {
Result<T> result = new Result<T>();
Result<T> result = new Result<>();
result.setCode(ResultStatus.FAIL.getCode());
result.setMessage(message);
result.setData(null);
return result;
}

public static Result buildFrom(ResultStatus resultStatus) {
Result result = new Result();
public static <T> Result<T> buildFrom(ResultStatus resultStatus) {
Result<T> result = new Result<>();
result.setCode(resultStatus.getCode());
result.setMessage(resultStatus.getMessage());
return result;
}

public static Result buildFrom(ResultStatus resultStatus, Object data) {
Result result = new Result();
public static <T> Result<T> buildFrom(ResultStatus resultStatus, T data) {
Result<T> result = new Result<>();
result.setCode(resultStatus.getCode());
result.setMessage(resultStatus.getMessage());
result.setData(data);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,7 @@ public String toString() {
}

public boolean legal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isNull(clusterId)
|| ValidateUtils.isEmptyList(regionIdList)
|| ValidateUtils.isNull(mode)) {
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(regionIdList) || ValidateUtils.isNull(mode)) {
return false;
}
if (!ClusterModeEnum.SHARED_MODE.getCode().equals(mode) && ValidateUtils.isNull(appId)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,7 @@ public String toString() {
}

public boolean legal() {
if (ValidateUtils.isNull(clusterId)
|| ValidateUtils.isNull(clusterId)
|| ValidateUtils.isEmptyList(brokerIdList)
|| ValidateUtils.isNull(status)) {
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(brokerIdList) || ValidateUtils.isNull(status)) {
return false;
}
description = ValidateUtils.isNull(description)? "": description;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import org.springframework.context.annotation.Lazy;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Service;
import org.springframework.web.context.request.RequestAttributes;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;

Expand Down Expand Up @@ -81,16 +82,19 @@ public void destroy() throws Exception {
}

public static String getUserName(){
HttpServletRequest request = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest();

String username = null;
if (TrickLoginConstant.TRICK_LOGIN_SWITCH_ON.equals(request.getHeader(TrickLoginConstant.TRICK_LOGIN_SWITCH))) {
// trick登录方式的获取用户
username = request.getHeader(TrickLoginConstant.TRICK_LOGIN_USER);
} else {
// 走页面登录方式登录的获取用户
HttpSession session = request.getSession();
username = (String) session.getAttribute(LoginConstant.SESSION_USERNAME_KEY);
RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes();
if (!ValidateUtils.isNull(requestAttributes)) {
HttpServletRequest request = ((ServletRequestAttributes) requestAttributes).getRequest();

if (TrickLoginConstant.TRICK_LOGIN_SWITCH_ON.equals(request.getHeader(TrickLoginConstant.TRICK_LOGIN_SWITCH))) {
// trick登录方式的获取用户
username = request.getHeader(TrickLoginConstant.TRICK_LOGIN_USER);
} else {
// 走页面登录方式登录的获取用户
HttpSession session = request.getSession();
username = (String) session.getAttribute(LoginConstant.SESSION_USERNAME_KEY);
}
}

if (ValidateUtils.isNull(username)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ public void setProducer_byte_rate(String producer_byte_rate) {

public static TopicQuotaData getClientData(Long producerByteRate, Long consumerByteRate) {
TopicQuotaData clientData = new TopicQuotaData();
if (!ValidateUtils.isNull(producerByteRate) && consumerByteRate != -1) {
if (!ValidateUtils.isNull(consumerByteRate) && consumerByteRate != -1) {
clientData.setConsumer_byte_rate(consumerByteRate.toString());
}
if (!ValidateUtils.isNull(consumerByteRate) && producerByteRate != -1) {
if (!ValidateUtils.isNull(producerByteRate) && producerByteRate != -1) {
clientData.setProducer_byte_rate(producerByteRate.toString());
}
return clientData;
Expand Down
18 changes: 18 additions & 0 deletions kafka-manager-core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -95,5 +95,23 @@
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>

<!-- testng -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/org.testng/testng -->
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.9.10</version>
</dependency>

</dependencies>
</project>
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,13 @@ public interface ConsumerService {
*/
List<String> getConsumerGroupConsumedTopicList(Long clusterId, String consumerGroup, String location);

/**
* 获取消费者offset
* @param clusterDO 集群
* @param topicName topic
* @param consumerGroup 消费组
* @return Map<partitionId, offset>
*/
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);

/**
Expand All @@ -52,7 +59,20 @@ List<Result> resetConsumerOffset(ClusterDO clusterDO,
ConsumerGroup consumerGroup,
List<PartitionOffsetDTO> partitionOffsetDTOList);

/**
* 获取每个集群消费组的个数
* @param clusterDOList 物理集群列表
* @return Map<clusterId, consumerGroupNums>
*/
Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList);

/**
* 验证消费组是否存在
* @param offsetLocation offset存放位置
* @param id 集群id
* @param topicName topic
* @param consumerGroup 消费组
* @return true:存在,false:不存在
*/
boolean checkConsumerGroupExist(OffsetLocationEnum offsetLocation, Long id, String topicName, String consumerGroup);
}
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,12 @@ public interface RegionService {
Map<Integer, RegionDO> convert2BrokerIdRegionMap(List<RegionDO> regionDOList);

/**
* 更新逻辑集群容量
* @param clusterId 集群id
* 根据RegionId更新Region
* @param regionId region的id
* @param newBrokerList 新的broker列表
* @return ResultStatus
*/
ResultStatus updateRegion(Long clusterId, String newBrokerList);
ResultStatus updateRegion(Long regionId, String newBrokerList);

/**
* 获取空闲的region的broker列表
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,13 @@ List<String> fetchTopicData(KafkaConsumer kafkaConsumer,
*/
List<TopicBrokerDTO> getTopicBrokerList(Long clusterId, String topicName);

/**
* 判断topic是否有数据写入,即分区topic的offset变化
* @param physicalClusterId 物理集群Id
* @param topicName topic名称
* @param latestTime 离当前多久开始计算
* @return
*/
Result<TopicOffsetChangedEnum> checkTopicOffsetChanged(Long physicalClusterId, String topicName, Long latestTime);

}
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topic
if (topicMetadata == null) {
logger.warn("class=ConsumerServiceImpl||method=getConsumeDetail||clusterId={}||topicName={}||msg=topicMetadata is null!",
clusterDO.getId(), topicName);
return null;
return Collections.emptyList();
}

List<ConsumeDetailDTO> consumerGroupDetailDTOList = null;
Expand All @@ -170,7 +170,7 @@ public List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topic
}
if (consumerGroupDetailDTOList == null) {
logger.info("class=ConsumerServiceImpl||method=getConsumeDetail||msg=consumerGroupDetailDTOList is null!");
return null;
return Collections.emptyList();
}

Map<TopicPartition, Long> topicPartitionLongMap = topicService.getPartitionOffset(clusterDO, topicName, OffsetPosEnum.END);
Expand Down Expand Up @@ -317,9 +317,6 @@ private Map<Integer, Long> getConsumerOffsetFromBK(ClusterDO clusterDO,
String consumerGroup) {
Map<Integer, String> stringOffsetMap =
getOffsetByGroupAndTopicFromBroker(clusterDO, consumerGroup, topicName);
if (ValidateUtils.isNull(stringOffsetMap)) {
return new HashMap<>(0);
}

Map<Integer, Long> offsetMap = new HashMap<>(stringOffsetMap.size());
for (Map.Entry<Integer, String> entry: stringOffsetMap.entrySet()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,11 @@ public TopicMetrics getTopicMetrics(Long clusterId, Integer brokerId, String top
if (ValidateUtils.isNull(jmxConnectorWrap)|| !jmxConnectorWrap.checkJmxConnectionAndInitIfNeed()) {
return null;
}

KafkaVersion kafkaVersion = physicalClusterMetadataManager.getKafkaVersion(clusterId, brokerId);

TopicMetrics metrics = new TopicMetrics(clusterId, topicName);
for (MbeanV2 mbeanV2: mbeanV2List) {
KafkaVersion kafkaVersion = physicalClusterMetadataManager.getKafkaVersion(clusterId, brokerId);
try {
getAndSupplyAttributes2BaseMetrics(
metrics,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,11 +138,11 @@ public ResultStatus updateRegion(RegionDO newRegionDO) {


@Override
public ResultStatus updateRegion(Long clusterId, String newBrokerList) {
if (ValidateUtils.isNull(clusterId) || ValidateUtils.isExistBlank(newBrokerList)) {
public ResultStatus updateRegion(Long regionId, String newBrokerList) {
if (ValidateUtils.isNull(regionId) || ValidateUtils.isExistBlank(newBrokerList)) {
return ResultStatus.PARAM_ILLEGAL;
}
RegionDO regionDO = getById(clusterId);
RegionDO regionDO = getById(regionId);
if (ValidateUtils.isNull(regionDO)) {
return ResultStatus.CLUSTER_NOT_EXIST;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,7 @@ public ResultStatus modifyTopicByOp(Long clusterId, String topicName, String app
authorityDO.setTopicName(topicName);
authorityDO.setAccess(TopicAuthorityEnum.READ_WRITE.getCode());
authorityService.addAuthority(authorityDO);
return ResultStatus.SUCCESS;
} catch (Exception e) {
LOGGER.error("modify topic failed, clusterId:{} topicName:{} description:{} operator:{} ",
clusterId, topicName, description, operator, e);
Expand Down Expand Up @@ -631,7 +632,7 @@ public ResultStatus addAuthority(AuthorityDO authorityDO) {
// 该用户无应用,需要先申请应用
return ResultStatus.APP_NOT_EXIST;
}
List<Long> appIds = appDOs.stream().map(AppDO::getId).collect(Collectors.toList());
List<String> appIds = appDOs.stream().map(AppDO::getAppId).collect(Collectors.toList());
if (!appIds.contains(authorityDO.getAppId())) {
// 入参中的appId,该用户未拥有
return ResultStatus.APP_NOT_EXIST;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,11 +250,11 @@ public TopicBasicDTO getTopicBasicDTO(Long clusterId, String topicName) {
@Override
public List<TopicPartitionDTO> getTopicPartitionDTO(ClusterDO clusterDO, String topicName, Boolean needDetail) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(topicName)) {
return null;
return new ArrayList<>();
}
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return null;
return new ArrayList<>();
}

List<PartitionState> partitionStateList = KafkaZookeeperUtils.getTopicPartitionState(
Expand Down Expand Up @@ -419,9 +419,6 @@ public List<TopicOverview> getTopicOverviewList(Long clusterId, List<String> top
topicDO,
appDO
);
if (ValidateUtils.isNull(overview)) {
continue;
}
dtoList.add(overview);
}

Expand Down Expand Up @@ -531,7 +528,7 @@ private List<PartitionState> getTopicPartitionState(Long clusterId, TopicMetadat
public List<PartitionOffsetDTO> getPartitionOffsetList(ClusterDO clusterDO, String topicName, Long timestamp) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (topicMetadata == null) {
return null;
return new ArrayList<>();
}
Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
for (Integer partitionId : topicMetadata.getPartitionMap().getPartitions().keySet()) {
Expand Down Expand Up @@ -575,7 +572,7 @@ public List<String> fetchTopicData(ClusterDO clusterDO, String topicName, TopicD
kafkaConsumer.close();
}
}
return null;
return new ArrayList<>();
}

private List<String> fetchTopicData(KafkaConsumer kafkaConsumer, ClusterDO clusterDO, String topicName, TopicDataSampleDTO reqObj) {
Expand All @@ -588,7 +585,7 @@ private List<String> fetchTopicData(KafkaConsumer kafkaConsumer, ClusterDO clust
tpList.add(new TopicPartition(topicName, partitionId));
}
if (ValidateUtils.isEmptyList(tpList)) {
return null;
return new ArrayList<>();
}

kafkaConsumer.assign(tpList);
Expand Down
Loading

0 comments on commit 7c0e9df

Please sign in to comment.