JoyLau's Blog

JoyLau 的技术学习与思考

注意

上篇中,我们获取到了请求数据报文,这篇继续获取响应报文并发往持久化存储
这里获取响应报文需要排除掉文件下载的情况

使用

  1. 新建类 ModifyResponseBodyGatewayFilterFactoryCopy
    该类照抄子 spring 源码 ModifyResponseBodyGatewayFilterFactory 添加了判断,当请求返回的头信息非 json 响应时, 将不再解析报文
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
/**
* 根据 spring 源码优化
* 优化了文件下载请求解析报文带来的内存使用
* 添加了如下代码判断,当请求返回的头信息非 json 响应时, 将不再解析报文
* public Mono<Void> writeWith(Publisher<? extends DataBuffer> body)
* if (!Utils.isJsonResponse(exchange)) {
* config.getRewriteFunction().apply(exchange, null);
* return getDelegate().writeWith(body);
* }
*
*/
public class ModifyResponseBodyGatewayFilterFactoryCopy extends AbstractGatewayFilterFactory<ModifyResponseBodyGatewayFilterFactoryCopy.Config> {

private final Map<String, MessageBodyDecoder> messageBodyDecoders;

private final Map<String, MessageBodyEncoder> messageBodyEncoders;

private final List<HttpMessageReader<?>> messageReaders;

public ModifyResponseBodyGatewayFilterFactoryCopy(List<HttpMessageReader<?>> messageReaders,
Set<MessageBodyDecoder> messageBodyDecoders, Set<MessageBodyEncoder> messageBodyEncoders) {
super(Config.class);
this.messageReaders = messageReaders;
this.messageBodyDecoders = messageBodyDecoders.stream()
.collect(Collectors.toMap(MessageBodyDecoder::encodingType, identity()));
this.messageBodyEncoders = messageBodyEncoders.stream()
.collect(Collectors.toMap(MessageBodyEncoder::encodingType, identity()));
}

@Override
public GatewayFilter apply(Config config) {
ModifyResponseGatewayFilter gatewayFilter = new ModifyResponseGatewayFilter(config);
gatewayFilter.setFactory(this);
return gatewayFilter;
}

public static class Config {

private Class inClass;

private Class outClass;

private Map<String, Object> inHints;

private Map<String, Object> outHints;

private String newContentType;

private RewriteFunction rewriteFunction;

public Class getInClass() {
return inClass;
}

public Config setInClass(Class inClass) {
this.inClass = inClass;
return this;
}

public Class getOutClass() {
return outClass;
}

public Config setOutClass(Class outClass) {
this.outClass = outClass;
return this;
}

public Map<String, Object> getInHints() {
return inHints;
}

public Config setInHints(Map<String, Object> inHints) {
this.inHints = inHints;
return this;
}

public Map<String, Object> getOutHints() {
return outHints;
}

public Config setOutHints(Map<String, Object> outHints) {
this.outHints = outHints;
return this;
}

public String getNewContentType() {
return newContentType;
}

public Config setNewContentType(String newContentType) {
this.newContentType = newContentType;
return this;
}

public RewriteFunction getRewriteFunction() {
return rewriteFunction;
}

public Config setRewriteFunction(RewriteFunction rewriteFunction) {
this.rewriteFunction = rewriteFunction;
return this;
}

public <T, R> Config setRewriteFunction(Class<T> inClass, Class<R> outClass,
RewriteFunction<T, R> rewriteFunction) {
setInClass(inClass);
setOutClass(outClass);
setRewriteFunction(rewriteFunction);
return this;
}

}

public class ModifyResponseGatewayFilter implements GatewayFilter, Ordered {

private final Config config;

private GatewayFilterFactory<Config> gatewayFilterFactory;

public ModifyResponseGatewayFilter(Config config) {
this.config = config;
}

@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
return chain.filter(exchange.mutate().response(new ModifiedServerHttpResponse(exchange, config)).build());
}

@Override
public int getOrder() {
return NettyWriteResponseFilter.WRITE_RESPONSE_FILTER_ORDER - 1;
}

@Override
public String toString() {
Object obj = (this.gatewayFilterFactory != null) ? this.gatewayFilterFactory : this;
return filterToStringCreator(obj).append("New content type", config.getNewContentType())
.append("In class", config.getInClass()).append("Out class", config.getOutClass()).toString();
}

public void setFactory(GatewayFilterFactory<Config> gatewayFilterFactory) {
this.gatewayFilterFactory = gatewayFilterFactory;
}

}

protected class ModifiedServerHttpResponse extends ServerHttpResponseDecorator {

private final ServerWebExchange exchange;

private final Config config;

public ModifiedServerHttpResponse(ServerWebExchange exchange, Config config) {
super(exchange.getResponse());
this.exchange = exchange;
this.config = config;
}

@SuppressWarnings("unchecked")
@Override
public Mono<Void> writeWith(Publisher<? extends DataBuffer> body) {
if (!Utils.isJsonResponse(exchange)) {
config.getRewriteFunction().apply(exchange, null);
return getDelegate().writeWith(body);
}

Class inClass = config.getInClass();
Class outClass = config.getOutClass();

String originalResponseContentType = exchange.getAttribute(ORIGINAL_RESPONSE_CONTENT_TYPE_ATTR);

HttpHeaders httpHeaders = new HttpHeaders();
// explicitly add it in this way instead of
// 'httpHeaders.setContentType(originalResponseContentType)'
// this will prevent exception in case of using non-standard media
// types like "Content-Type: image"
httpHeaders.add(HttpHeaders.CONTENT_TYPE, originalResponseContentType);

ClientResponse clientResponse = prepareClientResponse(body, httpHeaders);

// TODO: flux or mono
Mono modifiedBody = extractBody(exchange, clientResponse, inClass)
.flatMap(originalBody -> config.getRewriteFunction().apply(exchange, originalBody))
.switchIfEmpty(Mono.defer(() -> (Mono) config.getRewriteFunction().apply(exchange, null)));

BodyInserter bodyInserter = BodyInserters.fromPublisher(modifiedBody, outClass);
CachedBodyOutputMessage outputMessage = new CachedBodyOutputMessage(exchange,
exchange.getResponse().getHeaders());
return bodyInserter.insert(outputMessage, new BodyInserterContext()).then(Mono.defer(() -> {
Mono<DataBuffer> messageBody = writeBody(getDelegate(), outputMessage, outClass);
HttpHeaders headers = getDelegate().getHeaders();
if (!headers.containsKey(HttpHeaders.TRANSFER_ENCODING)
|| headers.containsKey(HttpHeaders.CONTENT_LENGTH)) {
messageBody = messageBody.doOnNext(data -> headers.setContentLength(data.readableByteCount()));
}
// TODO: fail if isStreamingMediaType?
return getDelegate().writeWith(messageBody);
}));
}

@Override
public Mono<Void> writeAndFlushWith(Publisher<? extends Publisher<? extends DataBuffer>> body) {
return writeWith(Flux.from(body).flatMapSequential(p -> p));
}

private ClientResponse prepareClientResponse(Publisher<? extends DataBuffer> body, HttpHeaders httpHeaders) {
ClientResponse.Builder builder;
builder = ClientResponse.create(exchange.getResponse().getStatusCode(), messageReaders);
return builder.headers(headers -> headers.putAll(httpHeaders)).body(Flux.from(body)).build();
}

private <T> Mono<T> extractBody(ServerWebExchange exchange, ClientResponse clientResponse, Class<T> inClass) {
// if inClass is byte[] then just return body, otherwise check if
// decoding required
if (byte[].class.isAssignableFrom(inClass)) {
return clientResponse.bodyToMono(inClass);
}

List<String> encodingHeaders = exchange.getResponse().getHeaders().getOrEmpty(HttpHeaders.CONTENT_ENCODING);
for (String encoding : encodingHeaders) {
MessageBodyDecoder decoder = messageBodyDecoders.get(encoding);
if (decoder != null) {
return clientResponse.bodyToMono(byte[].class).publishOn(Schedulers.parallel()).map(decoder::decode)
.map(bytes -> exchange.getResponse().bufferFactory().wrap(bytes))
.map(buffer -> prepareClientResponse(Mono.just(buffer),
exchange.getResponse().getHeaders()))
.flatMap(response -> response.bodyToMono(inClass));
}
}

return clientResponse.bodyToMono(inClass);
}

private Mono<DataBuffer> writeBody(ServerHttpResponse httpResponse, CachedBodyOutputMessage message,
Class<?> outClass) {
Mono<DataBuffer> response = DataBufferUtils.join(message.getBody());
if (byte[].class.isAssignableFrom(outClass)) {
return response;
}

List<String> encodingHeaders = httpResponse.getHeaders().getOrEmpty(HttpHeaders.CONTENT_ENCODING);
for (String encoding : encodingHeaders) {
MessageBodyEncoder encoder = messageBodyEncoders.get(encoding);
if (encoder != null) {
DataBufferFactory dataBufferFactory = httpResponse.bufferFactory();
response = response.publishOn(Schedulers.parallel()).map(buffer -> {
byte[] encodedResponse = encoder.encode(buffer);
DataBufferUtils.release(buffer);
return encodedResponse;
}).map(dataBufferFactory::wrap);
break;
}
}

return response;
}

}

}
  1. 新建自动导入类
1
2
3
4
5
6
7
8
9
10
11
@Configuration
public class ModifyResponseBodyGatewayFilterAutoConfiguration {

@Bean
@ConditionalOnEnabledFilter
public ModifyResponseBodyGatewayFilterFactoryCopy modifyResponseBodyGatewayFilterFactoryCopy(
ServerCodecConfigurer codecConfigurer, Set<MessageBodyDecoder> bodyDecoders,
Set<MessageBodyEncoder> bodyEncoders) {
return new ModifyResponseBodyGatewayFilterFactoryCopy(codecConfigurer.getReaders(), bodyDecoders, bodyEncoders);
}
}
  1. 新建响应日志全局拦截类
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
@Slf4j
@Component
@AllArgsConstructor
public class ResponseLogFilter implements GlobalFilter, Ordered {
private final ModifyResponseBodyGatewayFilterFactoryCopy modifyResponseBodyGatewayFilterFactoryCopy;

private final AsyncResponseHandler asyncResponseHandler;

@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
ModifyResponseBodyGatewayFilterFactoryCopy.Config config = new ModifyResponseBodyGatewayFilterFactoryCopy.Config()
.setRewriteFunction(byte[].class, byte[].class, (e, bytes) -> {
asyncResponseHandler.handle(e, bytes);
return Mono.justOrEmpty(bytes);
});
return modifyResponseBodyGatewayFilterFactoryCopy.apply(config).filter(exchange, chain);
}

@Override
public int getOrder() {
return NettyWriteResponseFilter.WRITE_RESPONSE_FILTER_ORDER - 98;
}
}

  1. 新建日志记录处理类
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
@Slf4j
@Component
@AllArgsConstructor
public class AsyncResponseHandler {

private final Jackson2HashMapper jackson2HashMapper;

private final RedisTemplate<String, AbstractLog> logRedisTemplate;

private final ObjectMapper objectMapper;

@Async("logTaskPool")
@SuppressWarnings("unchecked")
public void handle(ServerWebExchange exchange, byte[] bytes) {
ServerHttpResponse response = exchange.getResponse();
try {
Future<AccessLog> future =
(Future<AccessLog>) exchange.getAttributes().get(Constant.ACCESS_LOG_REQUEST_FUTURE_ATTR);
AccessLog accessLog = future.get(10, TimeUnit.SECONDS);
accessLog.setResponseHeaders(response.getHeaders().toSingleValueMap().toString());
try {
String aud = JwtUtils.verifyTokenSubject(accessLog.getToken());
accessLog.setOriginType(OriginType.getByValue(aud));
} catch (Exception e) {
accessLog.setOriginType(OriginType.OTHER);
}
accessLog.setTakenTime(System.currentTimeMillis() - accessLog.getTakenTime());
accessLog.setHttpCode(response.getRawStatusCode());
if (!Utils.isJsonResponse(exchange)) {
accessLog.setResponse("非 json 报文");
}
if (Utils.isDownloadResponse(exchange)) {
accessLog.setResponse("二进制文件");
}
Optional.ofNullable(bytes).ifPresent(bs -> {
if (bytes.length <= DataSize.ofKilobytes(256).toBytes()) {
// 小于指定大小报文进行转化(考虑到文件下载的响应报文)
accessLog.setResponse(new String(bytes, StandardCharsets.UTF_8));
} else {
accessLog.setResponse("报文过长");
}
});
logRedisTemplate.opsForStream().add(LogConstant.ACCESS_LOG_KEY_NAME, jackson2HashMapper.toHash(accessLog));
// 进行修剪,限制其最大长度, 防止内存过高
logRedisTemplate.opsForStream().trim(LogConstant.ACCESS_LOG_KEY_NAME, LogConstant.ACCESS_LOG_MAX_LENGTH,
true);
if (log.isDebugEnabled()) {
String logger = objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(accessLog);
log.debug("log: \n{}", logger);
}
} catch (Exception e) {
log.warn("access log save error: ", e);
}
}
}

SpringCloud Gateway 中想要获取请求体数据,这里介绍一种优雅的处理方法,就是使用 框架自带的 ModifyRequestBodyGatewayFilterFactory

使用

新建类 RequestLogFilter

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@Slf4j
@Component
@AllArgsConstructor
public class RequestLogFilter implements GlobalFilter, Ordered {

private final ModifyRequestBodyGatewayFilterFactory modifyRequestBodyGatewayFilterFactory;

private final AsyncRequestHandler asyncRequestHandle;

@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
// 不解析 body
if (Utils.isUploadRequest(exchange)) {
process(exchange, null);
return chain.filter(exchange);
}
ModifyRequestBodyGatewayFilterFactory.Config modifyRequestConfig =
new ModifyRequestBodyGatewayFilterFactory.Config()
.setRewriteFunction(byte[].class, byte[].class, (e, bytes) -> {
process(e, bytes);
return Mono.justOrEmpty(bytes);
});
return modifyRequestBodyGatewayFilterFactory.apply(modifyRequestConfig).filter(exchange, chain);
}

@Override
public int getOrder() {
return -100;
}

private void process(ServerWebExchange exchange, byte[] bytes) {
// 设置当前请求时间
exchange.getAttributes().put(Constant.REQUEST_START_TIME_ATTR, System.currentTimeMillis());
exchange.getAttributes().put(Constant.ACCESS_LOG_REQUEST_FUTURE_ATTR,
asyncRequestHandle.handle(exchange, bytes));
}
}
1
2
3
4
5
6
7
8
9
  /**
* 判断是否文件上传请求
*
* @param exchange ServerWebExchange
* @return boolean
*/
public static boolean isUploadRequest(ServerWebExchange exchange){
return MediaType.MULTIPART_FORM_DATA.isCompatibleWith(requestContentType(exchange));
}

新建 AsyncRequestHandler 类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
@Component
@Slf4j
@AllArgsConstructor
public class AsyncRequestHandler {
@Async("logTaskPool")
public Future<AccessLog> handle(ServerWebExchange exchange, byte[] bytes) {
return new AsyncResult<>(wrapperAccessLog(exchange, bytes));
}

/**
* 保证访问日志请求体
*
* @param exchange ServerWebExchange
* @param bytes data to send
* @return log
*/
private AccessLog wrapperAccessLog(ServerWebExchange exchange, byte[] bytes) {
ServerHttpRequest request = exchange.getRequest();
AccessLog accessLog = new AccessLog();
accessLog.setToken(Utils.getToken(exchange));
accessLog.setTime(LocalDateTime.now());
accessLog.setApplication(getApplicationName(exchange));
accessLog.setIp(Utils.getIp(exchange));
accessLog.setUri(request.getURI().toString());
accessLog.setHttpMethod(HttpMethod.valueOf(request.getMethodValue()));
// 临时设置当前时间,后面替换成真正耗时
accessLog.setTakenTime((long) exchange.getAttributes().get(Constant.REQUEST_START_TIME_ATTR));
accessLog.setRequestHeaders(request.getHeaders().toSingleValueMap().toString());
if(Utils.isUploadRequest(exchange)) {
accessLog.setRequest("二进制文件");
}
Optional.ofNullable(bytes).ifPresent(bs -> {
if (bytes.length <= DataSize.ofKilobytes(256).toBytes()) {
// 小于指定大小的报文进行转化(考虑到文件上传的请求报文)
accessLog.setRequest(new String(bytes, StandardCharsets.UTF_8));
} else {
accessLog.setRequest("报文过长");
}
});
return accessLog;
}

/**
* 获取服务名称
*
* @param exchange ServerWebExchange
* @return name of the application
*/
private String getApplicationName(ServerWebExchange exchange) {
String routingId =
(String) exchange.getAttributes().get(ServerWebExchangeUtils.GATEWAY_PREDICATE_MATCHED_PATH_ROUTE_ID_ATTR);
// 自动注册的微服务
if (routingId.startsWith(Constant.MODULE_SUB_PREFIX)) {
return routingId.substring(Constant.MODULE_SUB_PREFIX.length());
} else {
return routingId;
}
}

}

AccessLog

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
public class AccessLog extends AbstractLog {

/**
* 所属服务
*/
private String application;

/**
* ip
*/
private String ip;

/**
* uri
*/
private String uri;

/**
* 请求方法
*/
private HttpMethod httpMethod;

/**
* 消耗时间,单位毫秒
*/
private Long takenTime;

/**
* http 状态码
*/
private Integer httpCode;

/**
* 请求报文
*/
private String request;

/**
* 响应报文
*/
private String response;

/**
* 请求头信息
*/
private String requestHeaders;

/**
* 响应头信息
*/
private String responseHeaders;


public String getApplication() {
return application;
}

public void setApplication(String application) {
this.application = application;
}

public String getIp() {
return ip;
}

public void setIp(String ip) {
this.ip = ip;
}

public String getUri() {
return uri;
}

public void setUri(String uri) {
this.uri = uri;
}

public HttpMethod getHttpMethod() {
return httpMethod;
}

public void setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = httpMethod;
}

public Long getTakenTime() {
return takenTime;
}

public void setTakenTime(Long takenTime) {
this.takenTime = takenTime;
}

public Integer getHttpCode() {
return httpCode;
}

public void setHttpCode(Integer httpCode) {
this.httpCode = httpCode;
}

public String getRequest() {
return request;
}

public void setRequest(String request) {
this.request = request;
}

public String getResponse() {
return response;
}

public void setResponse(String response) {
this.response = response;
}

public String getRequestHeaders() {
return requestHeaders;
}

public void setRequestHeaders(String requestHeaders) {
this.requestHeaders = requestHeaders;
}

public String getResponseHeaders() {
return responseHeaders;
}

public void setResponseHeaders(String responseHeaders) {
this.responseHeaders = responseHeaders;
}
}

AbstractLog

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
public abstract class AbstractLog {

/**
* 日志 id
*/
private String id;

/**
* 访问 token
*/
private String token;

/**
* 用户 id
*/
private String userid;

/**
* 用户名(用户账号)
*/
private String username;

/**
* 用户姓名
*/
private String name;

/**
* 操作时间
*/
private LocalDateTime time;

/**
* 操作来源
*/
private OriginType originType;


public String getId() {
return id;
}

public void setId(String id) {
this.id = id;
}

public String getToken() {
return token;
}

public void setToken(String token) {
this.token = token;
}

public String getUserid() {
return userid;
}

public void setUserid(String userid) {
this.userid = userid;
}

public String getUsername() {
return username;
}

public void setUsername(String username) {
this.username = username;
}

public String getName() {
return name;
}

public void setName(String name) {
this.name = name;
}

public LocalDateTime getTime() {
return time;
}

public void setTime(LocalDateTime time) {
this.time = time;
}

public OriginType getOriginType() {
return originType;
}

public void setOriginType(OriginType originType) {
this.originType = originType;
}
}

注意:
这里对请求的数据进行了拦截并包装成日志对象存储在 exchange 的一个 ACCESS_LOG_REQUEST_FUTURE_ATTR 属性中,后面再拿到响应时,在从 exchange 拿回请求数据和响应数据一并使用

插件引入

1
2
3
4
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>

打包可执行 jar 重命名

1
2
3
4
5
6
7
8
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<!--可执行 jar 重命名-->
<classifier>exec</classifier>
</configuration>
</plugin>

定义环境变量

1
2
3
4
5
6
7
8
9
10
11
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<environmentVariables>
<ENV1>5000</ENV1>
<ENV2>Some Text</ENV2>
<ENV3/>
</environmentVariables>
</configuration>
</plugin>

定义系统变量

1
2
3
4
5
6
7
8
9
10
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<property1>test</property1>
<property2>${my.value}</property2>
</systemPropertyVariables>
</configuration>
</plugin>

更多使用方法参考 文档

记录下 OpenApi3 + SpringCloud Gateway 聚合文档的过程

组件选型

  1. SpringDoc
  2. Knife4j
  3. SpringCloud Gateway

项目配置

在所有的 spring boot 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

在 gateway 项目中引入 SpringDoc

1
2
3
4
5
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-webflux-ui</artifactId>
<version>${springdoc.version}</version>
</dependency>

并且需要排除 springdoc-openapi-ui 的依赖

OpenAPI 配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Configuration
@AllArgsConstructor
public class SwaggerConfiguration {
private final Environment environment;

@Bean
public OpenAPI openAPI() {
return new OpenAPI()
.info(info());
}

private Info info() {
return new Info()
.title("xxxx")
.description(environment.getProperty("spring.application.name") + " 服务 API 文档")
.version("xx")
.contact(new Contact().name("xxx").url("xxx").email("xxxxx"))
.summary("OpenAPI 文档");
}
}

文档聚合

聚合 swagger 添加分组

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@Component
@AllArgsConstructor
public class SwaggerConfig {
public static final String MODULE_SUB_PREFIX = "ReactiveCompositeDiscoveryClient_";

private final SwaggerUiConfigParameters swaggerUiConfigParameters;

private final RouteLocator routeLocator;

@Scheduled(fixedDelay = 20000)
public void apis() {
swaggerUiConfigParameters.getUrls().clear();
routeLocator.getRoutes().subscribe(routeDefinition -> {
if (routeDefinition.getId().contains(MODULE_SUB_PREFIX)) {
String name = routeDefinition.getId().substring(MODULE_SUB_PREFIX.length());
swaggerUiConfigParameters.addGroup(name);
}
});
}
}

修改 /v3/api-docs/ 报文添加 basePath 使得 Knife4j 在聚合文档下能正常调试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@Component
public class AddBasePathFilterFactory extends AbstractGatewayFilterFactory<AddBasePathFilterFactory.Config> {

private final ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory;

public AddBasePathFilterFactory(ModifyResponseBodyGatewayFilterFactory modifyResponseBodyGatewayFilterFactory) {
super(Config.class);
this.modifyResponseBodyGatewayFilterFactory = modifyResponseBodyGatewayFilterFactory;
}

@Override
public GatewayFilter apply(Config config) {
ModifyResponseBodyGatewayFilterFactory.Config cf = new ModifyResponseBodyGatewayFilterFactory.Config()
.setRewriteFunction(JsonNode.class, JsonNode.class,
(e, jsonNode) -> Mono.justOrEmpty(addBasePath(e, jsonNode)));
return modifyResponseBodyGatewayFilterFactory.apply(cf);
}

@Override
public String name() {
return "AddBasePath";
}

@Setter
public static class Config {
}

private JsonNode addBasePath(ServerWebExchange exchange, JsonNode jsonNode) {
if (jsonNode.isObject()) {
ObjectNode node = (ObjectNode) jsonNode;
String basePath = exchange.getRequest().getPath().subPath(4).value();
node.put("basePath", basePath);
return node;
}
return jsonNode;
}
}

网关路由配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
spring:
cloud:
gateway:
routes:
# openapi /v3/api-docs/组名 转 /组名/v3/api-docs; 再加 basePath 属性
- id: openapi
uri: http://localhost:${server.port}
predicates:
- Path=/v3/api-docs/**
filters:
- RewritePath=/v3/api-docs/(?<path>.*), /$\{path}/v3/api-docs
- AddBasePath
# 主页面重定向到文档聚合页面
- id: doc
uri: http://localhost:${server.port}
predicates:
- Path=/
filters:
- RedirectTo=302, /doc.html

说明

记录下 EFK(Elasticsearch8 + FileBeat + Kibana) 日志分析平台搭建
并加以用户名密码保护

证书生成

先启动一个 ES 节点,进入节点后使用下面的命令生成证书

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";

详细查看 官方文档

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
version: "2.2"
services:
es01:
image: elasticsearch:${STACK_VERSION}
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./es01-data:/usr/share/elasticsearch/data
container_name: elasticsearch-01
restart: always
ports:
- ${ES_PORT}:9200
networks:
- elastic
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
#- cluster.initial_master_nodes=es01,es02,es03
#- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- bootstrap.memory_lock=true
- xpack.security.enabled=true
#- xpack.security.http.ssl.enabled=true
#- xpack.security.http.ssl.key=certs/es01/es01.key
#- xpack.security.http.ssl.certificate=certs/es01/es01.crt
#- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
#- xpack.security.http.ssl.verification_mode=certificate
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt http://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
image: kibana:${STACK_VERSION}
container_name: kibana
restart: always
volumes:
- ./certs:/usr/share/kibana/config/certs
- ./kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
networks:
- elastic
environment:
- SERVERNAME=192.168.1.21
- SERVER_BASEPATH=/kibana
- SERVER_REWRITEBASEPATH=true
- ELASTICSEARCH_HOSTS=http://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
- I18N_LOCALE=zh-CN
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601/kibana | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
filebeat:
depends_on:
es01:
condition: service_healthy
image: elastic/filebeat:${STACK_VERSION}
container_name: filebeat
ports:
- 6115:6115
restart: always
volumes:
- ./filebeat-data/filebeat.yml:/usr/share/filebeat/filebeat.yml
- ./filebeat-data/filebeat.template.json:/usr/share/filebeat/filebeat.template.json
networks:
- elastic
networks:
elastic:

.env 文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=xxxxx

# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=Kaiyuan@2022

# Version of Elastic products
STACK_VERSION=8.4.0

# Set the cluster name
CLUSTER_NAME=docker-cluster

# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial

# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
#ES_PORT=127.0.0.1:9200

# Port to expose Kibana to the host
KIBANA_PORT=5601
#KIBANA_PORT=80

# Increase or decrease based on the available host memory (in bytes)
MEM_LIMIT=1073741824

# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=myproject

背景

记录一下 Atlassian 旗下的 Confluence 和 Jira 免费 License 申请

服务搭建

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
version: "3"
services:
confluence:
image: atlassian/confluence
container_name: confluence
restart: always
ports:
- 8090:8090
- 8091:8091
volumes:
- ./confluence-data:/var/atlassian/application-data/confluence
mysql:
image: mysql:8.0.22
container_name: mysql
security_opt:
- seccomp:unconfined
ports:
- 6101:3306
restart: always
volumes:
- ./mysql-data:/var/lib/mysql
- ./my.cnf:/etc/mysql/my.cnf
environment:
- MYSQL_ROOT_PASSWORD=Kaiyuan@2020
- TZ=Asia/Shanghai
jira:
image: atlassian/jira-software
container_name: jira
restart: always
ports:
- 8080:8080
volumes:
- ./jira-data:/var/atlassian/application-data/jira

my.cnf

1
2
3
4
5
6
7
8
9
10
11
12
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
secure-file-priv= NULL

# Custom config should go here
!includedir /etc/mysql/conf.d/
max_connections=1024

sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'
transaction-isolation=READ-COMMITTED

License 申请

Atlassian 的网站
点击 New Trial License 申请新的 License
选择 Confluence 再选择 Confluence (Data Center)
填入 Server ID 即可申请免费的一个月的 License
一个月到期后再次申请即可

背景

有时我们自建了 Jira 站点,又搭建了 Confluence 服务,想着不用再做一次用户的新增,可以使用 Jira 的用户到 Confluence 中使用

操作

第一步 在 Jira 中配置用户服务器

image-20221028183734308

其中 IP 地址指的是 confluence 服务所在的地址,相当于白名单地址的意思

第二步 配置 Confluence

配置 Confluence 的 用户目录 选项

image-20221028184043190

点击 “测试并保存” 完成数据的同步

背景

有时 Nacos 单机 MySQL 版重启服务器后无法提供服务, 是因为重启时均启动 nacos 服务和 MySQL 服务,而MySQL 服务启动的较慢, nacos 在启动的时候还连接不上数据库导致 Nacos 服务无法正常提供服务
这里我的解决方式是使用 Nacos 单机 Derby 版

部署

docker-compose.yml 文件内容如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
version: "2"
services:
nacos:
image: nacos/nacos-server:v2.1.0
container_name: nacos
restart: always
environment:
- PREFER_HOST_MODE=ip
- MODE=standalone
- NACOS_AUTH_ENABLE=true
volumes:
- ./standalone-logs/:/home/nacos/logs
- ./data:/home/nacos/data
ports:
- "8848:8848"

在 当前目录下新建文件夹 datastandalone-logs 启动服务即可

0%