Skip to content

Commit

Permalink
Merge pull request didi#440 from didi/dev
Browse files Browse the repository at this point in the history
1.安装部署脚本LogiKM可配置; 2.增加网关接口及第三方接口可直接调用的开关;
  • Loading branch information
ZQKC authored Jan 17, 2022
2 parents 426ba2d + f3f0432 commit a7f8c3c
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 41 deletions.
22 changes: 13 additions & 9 deletions distribution/conf/application.yml.example
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ client-pool:
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒

account:
jump-login:
gateway-api: false # 网关接口
third-part-api: false # 第三方接口
ldap:
enabled: false
url: ldap://127.0.0.1:389/
Expand All @@ -98,19 +101,20 @@ account:
auth-user-registration: true
auth-user-registration-role: normal

kcm:
enabled: false
s3:
kcm: # 集群安装部署,仅安装broker
enabled: false # 是否开启
s3: # s3 存储服务
endpoint: s3.didiyunapi.com
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678
timeout: 300
account: root
script-file: kcm_script.sh
n9e: # 夜莺
base-url: http://127.0.0.1:8004 # 夜莺job服务地址
user-token: 12345678 # 用户的token
timeout: 300 # 当台操作的超时时间
account: root # 操作时使用的账号
script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改
logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态

monitor:
enabled: false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,6 @@ public class ApiPrefix {
// open
public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/";

// 开放给OP的接口, 后续对 应的接口的集群都需要是物理集群
public static final String API_V1_THIRD_PART_OP_PREFIX = API_V1_THIRD_PART_PREFIX + "op/";

// 开放给Normal的接口, 后续对应的接口的集群,都需要是逻辑集群
public static final String API_V1_THIRD_PART_NORMAL_PREFIX = API_V1_THIRD_PART_PREFIX + "normal/";

// gateway
public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import javax.servlet.http.Cookie;
Expand All @@ -27,7 +28,13 @@
*/
@Service("loginService")
public class LoginServiceImpl implements LoginService {
private final static Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class);
private static final Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class);

@Value(value = "${account.jump-login.gateway-api:false}")
private Boolean jumpLoginGatewayApi;

@Value(value = "${account.jump-login.third-part-api:false}")
private Boolean jumpLoginThirdPartApi;

@Autowired
private AccountService accountService;
Expand Down Expand Up @@ -75,8 +82,10 @@ public boolean checkLogin(HttpServletRequest request, HttpServletResponse respon
return false;
}

if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX)) {
// 白名单接口直接true
if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX) ||
(jumpLoginGatewayApi != null && jumpLoginGatewayApi && classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) ||
(jumpLoginThirdPartApi != null && jumpLoginThirdPartApi && classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX))) {
// 登录接口 or 允许跳过且是跳过类型的接口,则直接跳过登录
return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,24 @@
public class N9e extends AbstractAgent {
private static final Logger LOGGER = LoggerFactory.getLogger(N9e.class);

@Value("${kcm.n9e.base-url}")
@Value("${kcm.n9e.base-url:}")
private String baseUrl;

@Value("${kcm.n9e.user-token}")
@Value("${kcm.n9e.user-token:12345678}")
private String userToken;

@Value("${kcm.n9e.account}")
@Value("${kcm.n9e.account:root}")
private String account;

@Value("${kcm.n9e.timeout}")
@Value("${kcm.n9e.timeout:300}")
private Integer timeout;

@Value("${kcm.n9e.script-file}")
@Value("${kcm.n9e.script-file:kcm_script.sh}")
private String scriptFile;

@Value("${kcm.n9e.logikm-url:}")
private String logiKMUrl;

private String script;

private static final String CREATE_TASK_URI = "/api/job-ce/tasks";
Expand Down Expand Up @@ -219,7 +222,8 @@ private N9eCreationTask buildCreateTaskParam(CreationTaskData creationTaskData)
sb.append(creationTaskData.getKafkaPackageUrl()).append(",,");
sb.append(creationTaskData.getServerPropertiesName().replace(KafkaFileEnum.SERVER_CONFIG.getSuffix(), "")).append(",,");
sb.append(creationTaskData.getServerPropertiesMd5()).append(",,");
sb.append(creationTaskData.getServerPropertiesUrl());
sb.append(creationTaskData.getServerPropertiesUrl()).append(",,");
sb.append(this.logiKMUrl);

N9eCreationTask n9eCreationTask = new N9eCreationTask();
n9eCreationTask.setTitle(Constant.TASK_TITLE_PREFIX + "-集群ID:" + creationTaskData.getClusterId());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@ p_kafka_server_properties_name=${7} #server配置名
p_kafka_server_properties_md5=${8} #server配置MD5
p_kafka_server_properties_url=${9} #server配置文件下载地址

p_kafka_manager_url=${10} #LogiKM地址

#----------------------------------------配置信息------------------------------------------------------#
g_base_dir='/home'
g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径
g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本
g_new_kafka_package_name='' #最终的包名
g_kafka_manager_addr='' #kafka-manager地址
g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
g_hostname=${g_local_ip}

Expand All @@ -47,7 +48,7 @@ function dchat_alarm() {

# 检查并初始化环境
function check_and_init_env() {
if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" ]; then
if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" -o -z "${p_kafka_manager_url}" ]; then
ECHO_LOG "存在为空的参数不合法, 退出集群任务"
dchat_alarm "存在为空的参数不合法, 退出集群任务"
exit 1
Expand All @@ -72,11 +73,11 @@ function check_and_init_env() {

# 检查并等待集群所有的副本处于同步的状态
function check_and_wait_broker_stabled() {
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
while [ "$under_replication_count" -ne 1 ]; do
ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s"
sleep 10
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
done
ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级"
}
Expand Down Expand Up @@ -324,6 +325,7 @@ ECHO_LOG " p_kafka_package_name=${p_kafka_package_name}"
ECHO_LOG " p_kafka_package_md5=${p_kafka_package_md5}"
ECHO_LOG " p_kafka_server_properties_name=${p_kafka_server_properties_name}"
ECHO_LOG " p_kafka_server_properties_md5=${p_kafka_server_properties_md5}"
ECHO_LOG " p_kafka_manager_url=${p_kafka_manager_url}"



Expand All @@ -342,7 +344,7 @@ fi
ECHO_LOG "停kafka服务"
stop_kafka_server

ECHO_LOG "停5秒, 确保"
ECHO_LOG "再停5秒, 确保端口已释放"
sleep 5

if [ "${p_cluster_task_type}" == "0" ];then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
*/
@Api(tags = "开放接口-Broker相关接口(REST)")
@RestController
@RequestMapping(ApiPrefix.API_V1_THIRD_PART_OP_PREFIX)
@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX)
public class ThirdPartBrokerController {
@Autowired
private BrokerService brokerService;
Expand All @@ -44,7 +44,7 @@ public class ThirdPartBrokerController {
private ClusterService clusterService;

@ApiOperation(value = "Broker信息概览", notes = "")
@RequestMapping(value = "{clusterId}/brokers/{brokerId}/overview", method = RequestMethod.GET)
@GetMapping(value = "{clusterId}/brokers/{brokerId}/overview")
@ResponseBody
public Result<ThirdPartBrokerOverviewVO> getBrokerOverview(@PathVariable Long clusterId,
@PathVariable Integer brokerId) {
Expand All @@ -70,7 +70,7 @@ public Result<ThirdPartBrokerOverviewVO> getBrokerOverview(@PathVariable Long cl
}

@ApiOperation(value = "BrokerRegion信息", notes = "所有集群的")
@RequestMapping(value = "broker-regions", method = RequestMethod.GET)
@GetMapping(value = "broker-regions")
@ResponseBody
public Result<List<BrokerRegionVO>> getBrokerRegions() {
List<ClusterDO> clusterDOList = clusterService.list();
Expand Down
22 changes: 13 additions & 9 deletions kafka-manager-web/src/main/resources/application.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ client-pool:
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒

account:
jump-login:
gateway-api: false # 网关接口
third-part-api: false # 第三方接口
ldap:
enabled: false
url: ldap://127.0.0.1:389/
Expand All @@ -92,19 +95,20 @@ account:
auth-user-registration: true
auth-user-registration-role: normal

kcm:
enabled: false
s3:
kcm: # 集群安装部署,仅安装broker
enabled: false # 是否开启
s3: # s3 存储服务
endpoint: s3.didiyunapi.com
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678
timeout: 300
account: root
script-file: kcm_script.sh
n9e: # 夜莺
base-url: http://127.0.0.1:8004 # 夜莺job服务地址
user-token: 12345678 # 用户的token
timeout: 300 # 当台操作的超时时间
account: root # 操作时使用的账号
script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改
logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态

monitor:
enabled: false
Expand Down

0 comments on commit a7f8c3c

Please sign in to comment.