JoyLau's Blog

JoyLau 的技术学习与思考

SpringCloud Gateway 中想要获取请求体数据,这里介绍一种优雅的处理方法,就是使用 框架自带的 ModifyRequestBodyGatewayFilterFactory

使用

新建类 RequestLogFilter

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@Slf4j
@Component
@AllArgsConstructor
public class RequestLogFilter implements GlobalFilter, Ordered {

private final ModifyRequestBodyGatewayFilterFactory modifyRequestBodyGatewayFilterFactory;

private final AsyncRequestHandler asyncRequestHandle;

@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
// 不解析 body
if (Utils.isUploadRequest(exchange)) {
process(exchange, null);
return chain.filter(exchange);
}
ModifyRequestBodyGatewayFilterFactory.Config modifyRequestConfig =
new ModifyRequestBodyGatewayFilterFactory.Config()
.setRewriteFunction(byte[].class, byte[].class, (e, bytes) -> {
process(e, bytes);
return Mono.justOrEmpty(bytes);
});
return modifyRequestBodyGatewayFilterFactory.apply(modifyRequestConfig).filter(exchange, chain);
}

@Override
public int getOrder() {
return -100;
}

private void process(ServerWebExchange exchange, byte[] bytes) {
// 设置当前请求时间
exchange.getAttributes().put(Constant.REQUEST_START_TIME_ATTR, System.currentTimeMillis());
exchange.getAttributes().put(Constant.ACCESS_LOG_REQUEST_FUTURE_ATTR,
asyncRequestHandle.handle(exchange, bytes));
}
}
1
2
3
4
5
6
7
8
9
  /**
* 判断是否文件上传请求
*
* @param exchange ServerWebExchange
* @return boolean
*/
public static boolean isUploadRequest(ServerWebExchange exchange){
return MediaType.MULTIPART_FORM_DATA.isCompatibleWith(requestContentType(exchange));
}

新建 AsyncRequestHandler 类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
@Component
@Slf4j
@AllArgsConstructor
public class AsyncRequestHandler {
@Async("logTaskPool")
public Future<AccessLog> handle(ServerWebExchange exchange, byte[] bytes) {
return new AsyncResult<>(wrapperAccessLog(exchange, bytes));
}

/**
* 保证访问日志请求体
*
* @param exchange ServerWebExchange
* @param bytes data to send
* @return log
*/
private AccessLog wrapperAccessLog(ServerWebExchange exchange, byte[] bytes) {
ServerHttpRequest request = exchange.getRequest();
AccessLog accessLog = new AccessLog();
accessLog.setToken(Utils.getToken(exchange));
accessLog.setTime(LocalDateTime.now());
accessLog.setApplication(getApplicationName(exchange));
accessLog.setIp(Utils.getIp(exchange));
accessLog.setUri(request.getURI().toString());
accessLog.setHttpMethod(HttpMethod.valueOf(request.getMethodValue()));
// 临时设置当前时间,后面替换成真正耗时
accessLog.setTakenTime((long) exchange.getAttributes().get(Constant.REQUEST_START_TIME_ATTR));
accessLog.setRequestHeaders(request.getHeaders().toSingleValueMap().toString());
if(Utils.isUploadRequest(exchange)) {
accessLog.setRequest("二进制文件");
}
Optional.ofNullable(bytes).ifPresent(bs -> {
if (bytes.length <= DataSize.ofKilobytes(256).toBytes()) {
// 小于指定大小的报文进行转化(考虑到文件上传的请求报文)
accessLog.setRequest(new String(bytes, StandardCharsets.UTF_8));
} else {
accessLog.setRequest("报文过长");
}
});
return accessLog;
}

/**
* 获取服务名称
*
* @param exchange ServerWebExchange
* @return name of the application
*/
private String getApplicationName(ServerWebExchange exchange) {
String routingId =
(String) exchange.getAttributes().get(ServerWebExchangeUtils.GATEWAY_PREDICATE_MATCHED_PATH_ROUTE_ID_ATTR);
// 自动注册的微服务
if (routingId.startsWith(Constant.MODULE_SUB_PREFIX)) {
return routingId.substring(Constant.MODULE_SUB_PREFIX.length());
} else {
return routingId;
}
}

}

AccessLog

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
public class AccessLog extends AbstractLog {

/**
* 所属服务
*/
private String application;

/**
* ip
*/
private String ip;

/**
* uri
*/
private String uri;

/**
* 请求方法
*/
private HttpMethod httpMethod;

/**
* 消耗时间,单位毫秒
*/
private Long takenTime;

/**
* http 状态码
*/
private Integer httpCode;

/**
* 请求报文
*/
private String request;

/**
* 响应报文
*/
private String response;

/**
* 请求头信息
*/
private String requestHeaders;

/**
* 响应头信息
*/
private String responseHeaders;


public String getApplication() {
return application;
}

public void setApplication(String application) {
this.application = application;
}

public String getIp() {
return ip;
}

public void setIp(String ip) {
this.ip = ip;
}

public String getUri() {
return uri;
}

public void setUri(String uri) {
this.uri = uri;
}

public HttpMethod getHttpMethod() {
return httpMethod;
}

public void setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = httpMethod;
}

public Long getTakenTime() {
return takenTime;
}

public void setTakenTime(Long takenTime) {
this.takenTime = takenTime;
}

public Integer getHttpCode() {
return httpCode;
}

public void setHttpCode(Integer httpCode) {
this.httpCode = httpCode;
}

public String getRequest() {
return request;
}

public void setRequest(String request) {
this.request = request;
}

public String getResponse() {
return response;
}

public void setResponse(String response) {
this.response = response;
}

public String getRequestHeaders() {
return requestHeaders;
}

public void setRequestHeaders(String requestHeaders) {
this.requestHeaders = requestHeaders;
}

public String getResponseHeaders() {
return responseHeaders;
}

public void setResponseHeaders(String responseHeaders) {
this.responseHeaders = responseHeaders;
}
}

AbstractLog

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
public abstract class AbstractLog {

/**
* 日志 id
*/
private String id;

/**
* 访问 token
*/
private String token;

/**
* 用户 id
*/
private String userid;

/**
* 用户名(用户账号)
*/
private String username;

/**
* 用户姓名
*/
private String name;

/**
* 操作时间
*/
private LocalDateTime time;

/**
* 操作来源
*/
private OriginType originType;


public String getId() {
return id;
}

public void setId(String id) {
this.id = id;
}

public String getToken() {
return token;
}

public void setToken(String token) {
this.token = token;
}

public String getUserid() {
return userid;
}

public void setUserid(String userid) {
this.userid = userid;
}

public String getUsername() {
return username;
}

public void setUsername(String username) {
this.username = username;
}

public String getName() {
return name;
}

public void setName(String name) {
this.name = name;
}

public LocalDateTime getTime() {
return time;
}

public void setTime(LocalDateTime time) {
this.time = time;
}

public OriginType getOriginType() {
return originType;
}

public void setOriginType(OriginType originType) {
this.originType = originType;
}
}

注意:
这里对请求的数据进行了拦截并包装成日志对象存储在 exchange 的一个 ACCESS_LOG_REQUEST_FUTURE_ATTR 属性中,后面再拿到响应时,在从 exchange 拿回请求数据和响应数据一并使用

插件引入

1
2
3
4
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>

打包可执行 jar 重命名

1
2
3
4
5
6
7
8
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<!--可执行 jar 重命名-->
<classifier>exec</classifier>
</configuration>
</plugin>

定义环境变量

1
2
3
4
5
6
7
8
9
10
11
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<environmentVariables>
<ENV1>5000</ENV1>
<ENV2>Some Text</ENV2>
<ENV3/>
</environmentVariables>
</configuration>
</plugin>

定义系统变量

1
2
3
4
5
6
7
8
9
10
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<property1>test</property1>
<property2>${my.value}</property2>
</systemPropertyVariables>
</configuration>
</plugin>

更多使用方法参考 文档

记录下 OpenApi3 + SpringCloud Gateway 聚合文档的过程

组件选型

  1. SpringDoc
  2. Knife4j
  3. SpringCloud Gateway

项目配置

在所有的 spring boot 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

在 gateway 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-webflux-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

并且需要排除 springdoc-openapi-ui 的依赖

OpenAPI 配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Configuration
@AllArgsConstructor
public class SwaggerConfiguration {
private final Environment environment;

@Bean
public OpenAPI openAPI() {
return new OpenAPI()
.info(info());
}

private Info info() {
return new Info()
.title("xxxx")
.description(environment.getProperty("spring.application.name") + " 服务 API 文档")
.version("xx")
.contact(new Contact().name("xxx").url("xxx").email("xxxxx"))
.summary("OpenAPI 文档");
}
}

文档聚合

聚合 swagger 添加分组

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Component
@AllArgsConstructor
public class SwaggerConfig {
public static final String MODULE_SUB_PREFIX = "ReactiveCompositeDiscoveryClient_";

private final SwaggerUiConfigParameters swaggerUiConfigParameters;

private final RouteLocator routeLocator;

@Scheduled(fixedDelay = 20000)
public void apis() {
swaggerUiConfigParameters.getUrls().clear();
routeLocator.getRoutes().subscribe(routeDefinition -> {
if (routeDefinition.getId().contains(MODULE_SUB_PREFIX)) {
String name = routeDefinition.getId().substring(MODULE_SUB_PREFIX.length());
swaggerUiConfigParameters.addGroup(name);
}
});
}
}

修改 /v3/api-docs/ 报文添加 basePath 使得 Knife4j 在聚合文档下能正常调试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@Component
public class AddBasePathFilterFactory extends AbstractGatewayFilterFactory<AddBasePathFilterFactory.Config> {

private final ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory;

public AddBasePathFilterFactory(ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory) {
super(Config.class);
this.modifyResponseBodyGatewayFilterFactory = modifyResponseBodyGatewayFilterFactory;
}

@Override
public GatewayFilter apply(Config config) {
ModifyResponseBodyGatewayFilterFactory.Config cf = new ModifyResponseBodyGatewayFilterFactory.Config()
.setRewriteFunction(JsonNode.class, JsonNode.class,
(e, jsonNode) -> Mono.justOrEmpty(addBasePath(e, jsonNode)));
return modifyResponseBodyGatewayFilterFactory.apply(cf);
}

@Override
public String name() {
return "AddBasePath";
}

@Setter
public static class Config {
}

private JsonNode addBasePath(ServerWebExchange exchange, JsonNode jsonNode) {
if (jsonNode.isObject()) {
ObjectNode node = (ObjectNode) jsonNode;
String basePath = exchange.getRequest().getPath().subPath(4).value();
node.put("basePath", basePath);
return node;
}
return jsonNode;
}
}

网关路由配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
spring:
cloud:
gateway:
routes:
# openapi /v3/api-docs/组名 转 /组名/v3/api-docs; 再加 basePath 属性
- id: openapi
uri: http://localhost:${server.port}
predicates:
- Path=/v3/api-docs/**
filters:
- RewritePath=/v3/api-docs/(?<path>.*), /$\{path}/v3/api-docs
- AddBasePath
# 主页面重定向到文档聚合页面
- id: doc
uri: http://localhost:${server.port}
predicates:
- Path=/
filters:
- RedirectTo=302, /doc.html

说明

记录下 EFK(Elasticsearch8 + FileBeat + Kibana) 日志分析平台搭建
并加以用户名密码保护

证书生成

先启动一个 ES 节点,进入节点后使用下面的命令生成证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";

详细查看 官方文档

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
version: "2.2"
services:
es01:
image: elasticsearch:${STACK_VERSION}
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./es01-data:/usr/share/elasticsearch/data
container_name: elasticsearch-01
restart: always
ports:
- ${ES_PORT}:9200
networks:
- elastic
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
#- cluster.initial_master_nodes=es01,es02,es03
#- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- bootstrap.memory_lock=true
- xpack.security.enabled=true
#- xpack.security.http.ssl.enabled=true
#- xpack.security.http.ssl.key=certs/es01/es01.key
#- xpack.security.http.ssl.certificate=certs/es01/es01.crt
#- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
#- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt http://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
image: kibana:${STACK_VERSION}
container_name: kibana
restart: always
volumes:
- ./certs:/usr/share/kibana/config/certs
- ./kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
networks:
- elastic
environment:
- SERVERNAME=192.168.1.21
- SERVER_BASEPATH=/kibana
- SERVER_REWRITEBASEPATH=true
- ELASTICSEARCH_HOSTS=http://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
- I18N_LOCALE=zh-CN
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601/kibana | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
filebeat:
depends_on:
es01:
condition: service_healthy
image: elastic/filebeat:${STACK_VERSION}
container_name: filebeat
ports:
- 6115:6115
restart: always
volumes:
- ./filebeat-data/filebeat.yml:/usr/share/filebeat/filebeat.yml
- ./filebeat-data/filebeat.template.json:/usr/share/filebeat/filebeat.template.json
networks:
- elastic
networks:
elastic:

.env 文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=xxxxx

# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=Kaiyuan@2022

# Version of Elastic products
STACK_VERSION=8.4.0

# Set the cluster name
CLUSTER_NAME=docker-cluster

# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial

# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200

# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80

# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824

# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=myproject

背景

记录一下 Atlassian 旗下的 Confluence 和 Jira 免费 License 申请

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
version: "3"
services:
confluence:
image: atlassian/confluence
container_name: confluence
restart: always
ports:
- 8090:8090
- 8091:8091
volumes:
- ./confluence-data:/var/atlassian/application-data/confluence
mysql:
image: mysql:8.0.22
container_name: mysql
security_opt:
- seccomp:unconfined
ports:
- 6101:3306
restart: always
volumes:
- ./mysql-data:/var/lib/mysql
- ./my.cnf:/etc/mysql/my.cnf
environment:
- MYSQL_ROOT_PASSWORD=Kaiyuan@2020
- TZ=Asia/Shanghai
jira:
image: atlassian/jira-software
container_name: jira
restart: always
ports:
- 8080:8080
volumes:
- ./jira-data:/var/atlassian/application-data/jira

my.cnf

1
2
3
4
5
6
7
8
9
10
11
12
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
secure-file-priv= NULL

# Custom config should go here
!includedir /etc/mysql/conf.d/
max_connections=1024

sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'
transaction-isolation=READ-COMMITTED

License 申请

Atlassian 的网站
点击 New Trial License 申请新的 License
选择 Confluence 再选择 Confluence (Data Center)
填入 Server ID 即可申请免费的一个月的 License
一个月到期后再次申请即可

背景

有时我们自建了 Jira 站点,又搭建了 Confluence 服务,想着不用再做一次用户的新增,可以使用 Jira 的用户到 Confluence 中使用

操作

第一步 在 Jira 中配置用户服务器

image-20221028183734308

其中 IP 地址指的是 confluence 服务所在的地址,相当于白名单地址的意思

第二步 配置 Confluence

配置 Confluence 的 用户目录 选项

image-20221028184043190

点击 “测试并保存” 完成数据的同步

背景

有时 Nacos 单机 MySQL 版重启服务器后无法提供服务, 是因为重启时均启动 nacos 服务和 MySQL 服务,而MySQL 服务启动的较慢, nacos 在启动的时候还连接不上数据库导致 Nacos 服务无法正常提供服务
这里我的解决方式是使用 Nacos 单机 Derby 版

部署

docker-compose.yml 文件内容如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
version: "2"
services:
nacos:
image: nacos/nacos-server:v2.1.0
container_name: nacos
restart: always
environment:
- PREFER_HOST_MODE=ip
- MODE=standalone
- NACOS_AUTH_ENABLE=true
volumes:
- ./standalone-logs/:/home/nacos/logs
- ./data:/home/nacos/data
ports:
- "8848:8848"

在 当前目录下新建文件夹 datastandalone-logs 启动服务即可

背景

有时服务器断电后再开机启动需要启动一些服务,除了正常的 rc 命令可以实现外,之前我常用的是 systemctl service, 今天发现 crontab 的 reboot 标签也可以实现

使用

crontab -e

1
2
3
4
# 启动后 120 秒启动 canal adapter
@reboot sleep 120; cd /data/msmp-service/canal/canal.adapter-1.1.5/bin && sh restart.sh

@reboot sleep 600; cd /data/gateway && sh handler.sh restart

保存后即可生效

一般情况下,我会 sleep 一段时间再启动服务,因为要等其他 systemd 服务启动完成,比如数据库服务

0%