redis\345\223\250\345\205\265\346\250\241\345\274\217\345\234\272\346\231\257\346\265\213\350\257\225.md
... ...
@@ -0,0 +1,388 @@
1
+### 集群搭建
2
+包含1个主节点和2个从节点以及3个哨兵节点的Redis集群,配置如下:
3
+
4
+主节点 redis-master 192.168.184.122 6379,由replication信息可知该节点角色为master,并有两个已经连接的从节点。
5
+
6
+```bash
7
+> redis-cli -p 6379 -a snest123 info replication
8
+
9
+Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
10
+# Replication
11
+role:master # 角色为master
12
+connected_slaves:2 # 有2个从节点
13
+slave0:ip=192.168.184.122,port=6381,state=online,offset=60270,lag=1 # 第一个从节点信息
14
+slave1:ip=192.168.184.122,port=6380,state=online,offset=60270,lag=1 # 第二个从节点信息
15
+master_failover_state:no-failover # 主节点没有故障转移状态
16
+master_replid:ebd8e552d0ac04310950e95263d63961acf1a51c
17
+master_replid2:0000000000000000000000000000000000000000
18
+master_repl_offset:60270
19
+second_repl_offset:-1
20
+repl_backlog_active:1
21
+repl_backlog_size:1048576
22
+repl_backlog_first_byte_offset:1
23
+repl_backlog_histlen:60270
24
+````
25
+2个从节点 redis-slave-1 和 redis-slave-2,ip都是192.168.184.122,端口分别是6381和6380
26
+
27
+```bash
28
+> redis-cli -p 6380 -a snest123 info replication
29
+Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
30
+# Replication
31
+role:slave # 角色为slave
32
+master_host:192.168.184.122 # 主节点IP
33
+master_port:6379
34
+master_link_status:up
35
+master_last_io_seconds_ago:1
36
+master_sync_in_progress:0
37
+slave_read_repl_offset:72389
38
+slave_repl_offset:72389
39
+slave_priority:100
40
+slave_read_only:1
41
+replica_announced:1
42
+connected_slaves:0
43
+master_failover_state:no-failover
44
+master_replid:ebd8e552d0ac04310950e95263d63961acf1a51c
45
+master_replid2:0000000000000000000000000000000000000000
46
+master_repl_offset:72389
47
+second_repl_offset:-1
48
+repl_backlog_active:1
49
+repl_backlog_size:1048576
50
+repl_backlog_first_byte_offset:12494
51
+repl_backlog_histlen:59896
52
+```
53
+
54
+3个哨兵节点 redis-sentinel-1、redis-sentinel-2、redis-sentinel-3,ip都是192.168.184.122,端口分别是26379、26380和26381
55
+
56
+```bash
57
+> redis-cli -p 26379 sentinel masters
58
+1) 1) "name"
59
+ 2) "mymaster"
60
+ 3) "ip"
61
+ 4) "192.168.184.122" # 主节点IP
62
+ 5) "port"
63
+ 6) "6379"
64
+ 7) "runid"
65
+ 8) "804b4d520bc6d6bb4673f10084fd59ef9bdd518e"
66
+ 9) "flags"
67
+ 10) "master" # 主节点标志
68
+ 11) "link-pending-commands"
69
+ 12) "0"
70
+ 13) "link-refcount"
71
+ 14) "1"
72
+ 15) "last-ping-sent"
73
+ 16) "0"
74
+ 17) "last-ok-ping-reply"
75
+ 18) "352"
76
+ 19) "last-ping-reply"
77
+ 20) "352"
78
+ 21) "down-after-milliseconds"
79
+ 22) "60000"
80
+ 23) "info-refresh"
81
+ 24) "2495"
82
+ 25) "role-reported"
83
+ 26) "master"
84
+ 27) "role-reported-time"
85
+ 28) "351514574"
86
+ 29) "config-epoch"
87
+ 30) "0"
88
+ 31) "num-slaves" # 从节点数量
89
+ 32) "2"
90
+ 33) "num-other-sentinels" # 其他哨兵数量
91
+ 34) "2"
92
+ 35) "quorum" # 哨兵选举的法定人数
93
+ 36) "2"
94
+ 37) "failover-timeout"
95
+ 38) "180000"
96
+ 39) "parallel-syncs"
97
+ 40) "1"
98
+ ```
99
+
100
+到这里整个集群已经搭建完成,接下来我们将进行一些redis哨兵模式下的异常测试。
101
+
102
+### 停止主节点 Redis 进程:
103
+
104
+观察sentinel输出日志:
105
+
106
+```bash
107
+1:X 12 Aug 2025 03:23:12.998 # +sdown master mymaster 192.168.184.122 6379 ---> Sentinel 检测到主节点不可用
108
+1:X 12 Aug 2025 03:23:13.082 # +odown master mymaster 192.168.184.122 6379 #quorum 2/2 ---> Sentinel 确认主节点不可用
109
+1:X 12 Aug 2025 03:23:13.082 # +new-epoch 3
110
+1:X 12 Aug 2025 03:23:13.082 # +try-failover master mymaster 192.168.184.122 6379 ---> Sentinel 尝试进行故障转移
111
+1:X 12 Aug 2025 03:23:13.093 # Could not rename tmp config file (Device or resource busy)
112
+1:X 12 Aug 2025 03:23:13.093 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
113
+1:X 12 Aug 2025 03:23:13.093 # +vote-for-leader 247379459a8d917fdf61931a5111f921dfa47408 3 ---> Sentinel 247379459a8d917fdf61931a5111f921dfa47408 投票选举新的主节点
114
+1:X 12 Aug 2025 03:23:13.116 # e39828966d5ae85ef8292ce9aa085c84ef5d6203 voted for 247379459a8d917fdf61931a5111f921dfa47408 3
115
+1:X 12 Aug 2025 03:23:13.116 # ef17ddf007aa36aa2d91f5a7896b8141adca2e07 voted for 247379459a8d917fdf61931a5111f921dfa47408 3
116
+1:X 12 Aug 2025 03:23:13.156 # +elected-leader master mymaster 192.168.184.122 6379 ---> Sentinel 247379459a8d917fdf61931a5111f921dfa47408 成为新的主节点
117
+1:X 12 Aug 2025 03:23:13.156 # +failover-state-select-slave master mymaster 192.168.184.122 6379
118
+1:X 12 Aug 2025 03:23:13.247 # +selected-slave slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379
119
+1:X 12 Aug 2025 03:23:13.247 * +failover-state-send-slaveof-noone slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379 --> Sentinel 选择了一个从节点作为新的主节点
120
+1:X 12 Aug 2025 03:23:13.309 * +failover-state-wait-promotion slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379
121
+1:X 12 Aug 2025 03:23:13.339 # Could not rename tmp config file (Device or resource busy)
122
+1:X 12 Aug 2025 03:23:13.339 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
123
+1:X 12 Aug 2025 03:23:13.339 # +promoted-slave slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379
124
+1:X 12 Aug 2025 03:23:13.339 # +failover-state-reconf-slaves master mymaster 192.168.184.122 6379
125
+1:X 12 Aug 2025 03:23:13.410 * +slave-reconf-sent slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6379
126
+1:X 12 Aug 2025 03:23:14.216 # -odown master mymaster 192.168.184.122 6379
127
+1:X 12 Aug 2025 03:23:14.270 * +slave-reconf-inprog slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6379
128
+1:X 12 Aug 2025 03:23:14.270 * +slave-reconf-done slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6379
129
+1:X 12 Aug 2025 03:23:14.322 # +failover-end master mymaster 192.168.184.122 6379
130
+1:X 12 Aug 2025 03:23:14.322 # +switch-master mymaster 192.168.184.122 6379 192.168.184.122 6380
131
+1:X 12 Aug 2025 03:23:14.323 * +slave slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6380
132
+1:X 12 Aug 2025 03:23:14.323 * +slave slave 192.168.184.122:6379 192.168.184.122 6379 @ mymaster 192.168.184.122 6380
133
+1:X 12 Aug 2025 03:23:14.336 # Could not rename tmp config file (Device or resource busy)
134
+1:X 12 Aug 2025 03:23:14.337 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
135
+1:X 12 Aug 2025 03:24:14.350 # +sdown slave 192.168.184.122:6379 192.168.184.122 6379 @ mymaster 192.168.184.122 6380
136
+
137
+```
138
+查看sentinel 状态:
139
+```bash
140
+> redis-cli -p 26379 sentinel masters
141
+1) 1) "name"
142
+ 2) "mymaster"
143
+ 3) "ip"
144
+ 4) "192.168.184.122" # 主节点IP
145
+ 5) "port"
146
+ 6) "6380" # 新的主节点端口, 之前的主节点6379已经停止, 现在6380成为新的主节点, 说明故障转移成功
147
+ 7) "runid"
148
+ 8) "539781d1f63dd363c26c6e37f632d3ced0a535f5"
149
+ 9) "flags"
150
+ 10) "master"
151
+ 11) "link-pending-commands"
152
+ 12) "0"
153
+ 13) "link-refcount"
154
+ 14) "1"
155
+ 15) "last-ping-sent"
156
+ 16) "0"
157
+ 17) "last-ok-ping-reply"
158
+ 18) "302"
159
+ 19) "last-ping-reply"
160
+ 20) "302"
161
+ 21) "down-after-milliseconds"
162
+ 22) "60000"
163
+ 23) "info-refresh"
164
+ 24) "7943"
165
+ 25) "role-reported"
166
+ 26) "master"
167
+ 27) "role-reported-time"
168
+ 28) "149174"
169
+ 29) "config-epoch"
170
+ 30) "3"
171
+ 31) "num-slaves"
172
+ 32) "2"
173
+ 33) "num-other-sentinels"
174
+ 34) "2"
175
+ 35) "quorum"
176
+ 36) "2"
177
+ 37) "failover-timeout"
178
+ 38) "180000"
179
+ 39) "parallel-syncs"
180
+ 40) "1"
181
+```
182
+
183
+进一步验证 192.168.184.122:6380 是否真的成为了新的主节点:
184
+```bash
185
+> redis-cli -p 6380 -a snest123 info replication
186
+Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
187
+# Replication
188
+role:master # 确实角色为master
189
+connected_slaves:1 # 现在只有一个从节点
190
+slave0:ip=192.168.184.122,port=6381,state=online,offset=201890,lag=0 # 之前的从节点6381现在仍然在线,依然作为一个从节点存在
191
+master_failover_state:no-failover
192
+master_replid:ad6a01d6a123ea956ec35f0d80e3d1e83191bb7b
193
+master_replid2:ebd8e552d0ac04310950e95263d63961acf1a51c
194
+master_repl_offset:202035
195
+second_repl_offset:154594
196
+repl_backlog_active:1
197
+repl_backlog_size:1048576
198
+repl_backlog_first_byte_offset:12494
199
+repl_backlog_histlen:189542
200
+```
201
+
202
+获取之前master写入的数据测试数据是否还存在:
203
+```bash
204
+redis-cli -p 6380 -a snest123 get foo
205
+Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
206
+"bar" # 数据仍然存在
207
+```
208
+由上述测试结果可以看出,Redis哨兵模式下的故障转移功能正常工作,新的主节点成功接管了之前主节点的角色,并且数据保持一致性。
209
+
210
+重新启动之前的主节点 Redis 进程,作为6380的slave而存在:
211
+
212
+```bash
213
+1:X 12 Aug 2025 03:24:14.350 # +sdown slave 192.168.184.122:6379 192.168.184.122 6379 @ mymaster 192.168.184.122 6380
214
+1:X 12 Aug 2025 03:30:50.866 # -sdown slave 192.168.184.122:6379 192.168.184.122 6379 @ mymaster 192.168.184.122 6380
215
+1:X 12 Aug 2025 03:31:00.799 * +convert-to-slave slave 192.168.184.122:6379 192.168.184.122 6379 @ mymaster 192.168.184.122 6380
216
+```
217
+### 模拟哨兵节点故障
218
+
219
+停止其中一个哨兵节点 redis-sentinel-1 进程,观察另外两个哨兵节点的日志输出,发现没输出任何关于故障的日志信息。
220
+
221
+
222
+
223
+停掉2个哨兵节点 redis-sentinel-1 和 redis-sentinel-2 进程,观察剩余一个哨兵节点 redis-sentinel-3 的日志输出:
224
+
225
+```bash
226
+
227
+1:X 12 Aug 2025 03:38:08.991 # +sdown sentinel e39828966d5ae85ef8292ce9aa085c84ef5d6203 192.168.184.122 26381 @ mymaster 192.168.184.122 6380
228
+1:X 12 Aug 2025 03:38:44.972 # +sdown sentinel ef17ddf007aa36aa2d91f5a7896b8141adca2e07 192.168.184.122 26380 @ mymaster 192.168.184.122 6380
229
+1:X 12 Aug 2025 03:39:05.514 # +sdown master mymaster 192.168.184.122 6380
230
+
231
+```
232
+
233
+此时在程序中尝试连接redis集群,观察连接情况:
234
+
235
+```bash
236
+redis: 2025/08/12 11:43:54 sentinel.go:770: sentinel: selected addr=192.168.184.122:26379 masterAddr=192.168.184.122:6380
237
+redis: 2025/08/12 11:43:54 sentinel.go:759: sentinel: GetMasterAddrByName addr=192.168.184.122:26381, master="mymaster" failed: context canceled
238
+redis: 2025/08/12 11:43:54 sentinel.go:759: sentinel: GetMasterAddrByName addr=192.168.184.122:26380, master="mymaster" failed: context canceled
239
+redis: 2025/08/12 11:43:54 sentinel.go:920: sentinel: new master="mymaster" addr="192.168.184.122:6380"
240
+panic: dial tcp 192.168.184.122:6380: connectex: No connection could be made because the target machine actively refused it.
241
+
242
+```
243
+发现在半数以上哨兵节点故障时,程序无法连接到Redis集群的主节点,导致panic错误。这是因为哨兵节点的故障导致无法获取主节点的地址,从而无法建立连接。
244
+
245
+### 恢复哨兵节点
246
+
247
+重新启动之前停止的哨兵节点 redis-sentinel-1 和 redis-sentinel-2 进程,观察日志输出:
248
+
249
+```bash
250
+1:X 12 Aug 2025 03:47:09.266 # -sdown sentinel ef17ddf007aa36aa2d91f5a7896b8141adca2e07 192.168.184.122 26380 @ mymaster 192.168.184.122 6380
251
+1:X 12 Aug 2025 03:47:11.234 * +sentinel-invalid-addr sentinel ef17ddf007aa36aa2d91f5a7896b8141adca2e07 192.168.184.122 26380 @ mymaster 192.168.184.122 6380
252
+1:X 12 Aug 2025 03:47:11.234 * +sentinel sentinel 96ac7bcab8868b94de33ab08a10761545b6d42c6 192.168.184.122 26380 @ mymaster 192.168.184.122 6380
253
+1:X 12 Aug 2025 03:47:11.242 # Could not rename tmp config file (Device or resource busy)
254
+1:X 12 Aug 2025 03:47:11.242 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
255
+1:X 12 Aug 2025 03:48:09.284 # Could not rename tmp config file (Device or resource busy)
256
+1:X 12 Aug 2025 03:48:09.284 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
257
+1:X 12 Aug 2025 03:48:09.284 # +new-epoch 4
258
+1:X 12 Aug 2025 03:48:09.291 # Could not rename tmp config file (Device or resource busy)
259
+1:X 12 Aug 2025 03:48:09.292 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
260
+1:X 12 Aug 2025 03:48:09.292 # +vote-for-leader 96ac7bcab8868b94de33ab08a10761545b6d42c6 4
261
+1:X 12 Aug 2025 03:48:09.497 # +odown master mymaster 192.168.184.122 6380 #quorum 2/2
262
+1:X 12 Aug 2025 03:48:09.497 # Next failover delay: I will not start a failover before Tue Aug 12 03:54:09 2025
263
+1:X 12 Aug 2025 03:48:09.633 # +config-update-from sentinel 96ac7bcab8868b94de33ab08a10761545b6d42c6 192.168.184.122 26380 @ mymaster 192.168.184.122 6380
264
+1:X 12 Aug 2025 03:48:09.633 # +switch-master mymaster 192.168.184.122 6380 192.168.184.122 6379
265
+1:X 12 Aug 2025 03:48:09.633 * +slave slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6379
266
+1:X 12 Aug 2025 03:48:09.633 * +slave slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379
267
+1:X 12 Aug 2025 03:48:09.640 # Could not rename tmp config file (Device or resource busy)
268
+1:X 12 Aug 2025 03:48:09.640 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
269
+```
270
+
271
+发现哨兵节点重新启动后,能够自动检测到主节点的变化,并进行相应的配置更新和故障转移操作。此时,Redis集群恢复了正常的工作状态。
272
+再次尝试在程序中连接Redis集群,都能正常访问。
273
+
274
+继续全部恢复所有redis节点,重新启动之前停止的主节点 redis-master 进程:
275
+
276
+```bash
277
+1:X 12 Aug 2025 03:50:14.541 # -sdown slave 192.168.184.122:6380 192.168.184.122 6380 @ mymaster 192.168.184.122 6379
278
+1:X 12 Aug 2025 03:50:20.838 * +reboot master mymaster 192.168.184.122 6379
279
+1:X 12 Aug 2025 03:50:36.213 # -sdown sentinel e39828966d5ae85ef8292ce9aa085c84ef5d6203 192.168.184.122 26381 @ mymaster 192.168.184.122 6379
280
+1:X 12 Aug 2025 03:50:37.284 * +sentinel-invalid-addr sentinel e39828966d5ae85ef8292ce9aa085c84ef5d6203 192.168.184.122 26381 @ mymaster 192.168.184.122 6379
281
+1:X 12 Aug 2025 03:50:37.284 * +sentinel sentinel e4213f7cc46cbac21727dd4ec930e65cd2c1dba2 192.168.184.122 26381 @ mymaster 192.168.184.122 6379
282
+1:X 12 Aug 2025 03:50:37.293 # Could not rename tmp config file (Device or resource busy)
283
+1:X 12 Aug 2025 03:50:37.293 # WARNING: Sentinel was not able to save the new configuration on disk!!!: Device or resource busy
284
+1:X 12 Aug 2025 03:51:16.272 * +fix-slave-config slave 192.168.184.122:6381 192.168.184.122 6381 @ mymaster 192.168.184.122 6379
285
+```
286
+可以发现一切都恢复正常,Redis集群的主节点、从节点和哨兵节点都能够正常工作。
287
+
288
+到此,Redis哨兵模式下的异常测试已经完成。通过这些测试,我们验证了Redis哨兵在主节点故障、从节点故障以及哨兵节点故障等情况下的自动恢复和故障转移能力。
289
+下面继续测试业务逻辑的异常情况。
290
+
291
+### 业务异常
292
+
293
+跑了大概一天,没有发现redisson连接异常的日志。
294
+
295
+### 场景1:主从切换
296
+步骤:
297
+哨兵Redis停止master节点,模拟主从切换。
298
+之后更新APP,查看元模型能否被消费。
299
+验证结果:
300
+使用Topic检查文档(我之前提供的文档),检查对应的客户端能否正常及时消费
301
+检查slowlog是否有超过200ms的执行命令
302
+
303
+
304
+### 场景2:初次安装
305
+**步骤**:
306
+清空Redis中 final_meta* 下的内容,重启所有后端容器,检查元模型能否被正常消费
307
+**验证结果**:
308
+使用Topic检查文档(我之前提供的文档),检查对应的客户端能否正常及时消费
309
+检查slowlog是否有超过200ms的执行命令
310
+
311
+**实际验证结果**:
312
+能够正常消费,没有发现消费延迟的情况。
313
+
314
+消费topic详情如下:
315
+
316
+```bash
317
+
318
+2025/08/13 15:57:34 收到消息 ID=1755071850670-0, 内容=map[key:OppmMonthTarget source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
319
+2025/08/13 15:57:35 收到消息 ID=1755071850700-0, 内容=map[key:OppmProLine source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
320
+2025/08/13 15:57:35 收到消息 ID=1755071852169-0, 内容=map[key:demo_role,data_source_test,crud_ds_test,tenant_ds_test,OppmCustomerArchive,a_app_model,buss_store,ops_trace_log,OppmOpportunity,test_order,sql_template_test,TestTest1,shard_year_test,ops_tracking_user_model,TestTest2,test_order_ref,OppmCompanyReport,TestUser,demo_product,service_node source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
321
+2025/08/13 15:57:35 收到消息 ID=1755071852244-0, 内容=map[key:group_by_test,sie_order,OppmProLine,apiOrderService,OppmCrmOpportunityLog,OppmCrmOpportunity,test_eam_maintenance_task,OppmOpportunityThisWeek,wf_process_policy_user,test_data_auth_vo,install_app,demoUserSendVariables,wf_service_method_hook,TestOrderOrg,wf_process_model_view,worker_job_info,seed_ds_test,TestOrg,shard_list_test,seed_test source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
322
+2025/08/13 15:57:35 收到消息 ID=1755071852292-0, 内容=map[key:er_a,test_eam_maintenance_document,worker_instance_info,demo_order,main_ds_test,host_node,demo_product_b,demo_product_c,TestDataSource,base_model_test,test_eam_maintenance_other_expenses,OppmIndustry,inherit_model_test,OppmRefProLineOrg,demoUserReceiverVariables,demo_supplier,TestRole,TestTest,MetaPropertyVm,wf_process_test source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
323
+2025/08/13 15:57:36 收到消息 ID=1755071852382-0, 内容=map[key:test_eam_fault_maintenance_order,OppmMonthTarget,normal_scene_test,many_to_many_c,test_eam_maintenance_item,broadcast_test,buss_order,base_importexportexcel,crud_ds_test_ref,test_eam_maintenance_working_hours,worker_registration_info,sie_order_item,host_node_vm,UserVm,buss_ds_test,test_eam_maintenance_information,er_d,MetaAppVm,er_b,test_data_auth source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
324
+2025/08/13 15:57:36 收到消息 ID=1755071852424-0, 内容=map[key:product_ds_test,OppmRefOrgAccount,OppmAreaSalesManager,buss_wallet,OppmOpportunityNextWeek,receiverVariables,shard_month_test,TestRule,ops_trace_handle_service,demo_user,senderVariables,OppmAreaReport,ops_tracking_service_model source:47d7894f-070b-47d9-8a75-94c242440260 type:update]
325
+
326
+```
327
+
328
+### 场景3:安装、更新、卸载
329
+**步骤**:
330
+测试正常的安装、更新、卸载在哨兵模式下的效果
331
+**验证方式**:
332
+使用Topic检查文档(我之前提供的文档),检查对应的客户端能否正常及时消费
333
+检查slowlog是否有超过200ms的执行命令
334
+
335
+**实际验证结果**:
336
+能够正常消费,没有发现消费延迟的情况。
337
+能够正常安装、更新、卸载。安装卸载、更新、卸载后,元模型都能被正常消费。但是出现一些单个的stream事件,而且是不相干的元模型事件。
338
+
339
+
340
+安装app后消费详情如下:
341
+
342
+```bash
343
+
344
+2025/08/13 16:07:34 收到消息 ID=1755072452966-0, 内容=map[key:OppmMonthTarget source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
345
+2025/08/13 16:07:34 收到消息 ID=1755072452994-0, 内容=map[key:OppmProLine source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
346
+2025/08/13 16:07:37 收到消息 ID=1755072455636-0, 内容=map[key:demo_role,data_source_test,crud_ds_test,tenant_ds_test,OppmCustomerArchive,a_app_model,buss_store,ops_trace_log,OppmOpportunity,test_order,sql_template_test,TestTest1,shard_year_test,ops_tracking_user_model,TestTest2,test_order_ref,OppmCompanyReport,TestUser,demo_product,service_node source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
347
+2025/08/13 16:07:37 收到消息 ID=1755072455733-0, 内容=map[key:group_by_test,sie_order,OppmProLine,apiOrderService,OppmCrmOpportunityLog,OppmCrmOpportunity,test_eam_maintenance_task,OppmOpportunityThisWeek,wf_process_policy_user,test_data_auth_vo,install_app,demoUserSendVariables,wf_service_method_hook,TestOrderOrg,wf_process_model_view,worker_job_info,seed_ds_test,TestOrg,shard_list_test,seed_test source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
348
+2025/08/13 16:07:37 收到消息 ID=1755072455783-0, 内容=map[key:er_a,test_eam_maintenance_document,worker_instance_info,demo_order,main_ds_test,host_node,demo_product_b,demo_product_c,TestDataSource,base_model_test,test_eam_maintenance_other_expenses,OppmIndustry,inherit_model_test,OppmRefProLineOrg,demoUserReceiverVariables,demo_supplier,TestRole,TestTest,MetaPropertyVm,wf_process_test source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
349
+2025/08/13 16:07:37 收到消息 ID=1755072455881-0, 内容=map[key:test_eam_fault_maintenance_order,OppmMonthTarget,normal_scene_test,many_to_many_c,test_eam_maintenance_item,broadcast_test,buss_order,base_importexportexcel,crud_ds_test_ref,test_eam_maintenance_working_hours,worker_registration_info,sie_order_item,host_node_vm,UserVm,buss_ds_test,test_eam_maintenance_information,er_d,MetaAppVm,er_b,test_data_auth source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
350
+2025/08/13 16:07:37 收到消息 ID=1755072455909-0, 内容=map[key:product_ds_test,OppmRefOrgAccount,OppmAreaSalesManager,buss_wallet,OppmOpportunityNextWeek,receiverVariables,shard_month_test,TestRule,ops_trace_handle_service,demo_user,senderVariables,OppmAreaReport,ops_tracking_service_model source:97f97547-d38c-4f10-ab4e-4099f96a718f type:update]
351
+
352
+```
353
+卸载消费详情如下:
354
+
355
+```bash
356
+
357
+2025/08/13 16:02:14 收到消息 ID=1755072133153-0, 内容=map[key:test_eam_maintenance_document,TestTest,TestUser,TestDataSource,test_eam_maintenance_other_expenses,TestTest1,TestTest2,TestRole,test_order_ref,test_order,MetaPropertyVm,TestOrderOrg,test_eam_maintenance_information,test_eam_fault_maintenance_order,test_eam_maintenance_item,test_eam_maintenance_working_hours,UserVm,MetaAppVm,test_data_auth,base_importexportexcel,TestRule,TestOrg,test_data_auth_vo,test_eam_maintenance_task source:bb0ab662-2b19-4b9b-9ffe-0d8b5e9d9223 type:remove]
358
+
359
+```
360
+更新后消费详情如下:
361
+
362
+```bash
363
+
364
+2025/08/13 17:08:56 收到消息 ID=1755076134622-0, 内容=map[key:OppmMonthTarget source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
365
+2025/08/13 17:08:56 收到消息 ID=1755076134667-0, 内容=map[key:OppmProLine source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
366
+2025/08/13 17:08:59 收到消息 ID=1755076137980-0, 内容=map[key:demo_role,data_source_test,crud_ds_test,tenant_ds_test,OppmCustomerArchive,a_app_model,buss_store,ops_trace_log,OppmOpportunity,test_order,sql_template_test,TestTest1,shard_year_test,ops_tracking_user_model,TestTest2,test_order_ref,worker_job_info,TestUser,demo_product,service_node source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
367
+2025/08/13 17:08:59 收到消息 ID=1755076138058-0, 内容=map[key:group_by_test,sie_order,OppmProLine,apiOrderService,OppmCrmOpportunityLog,OppmCrmOpportunity,test_eam_maintenance_task,OppmOpportunityThisWeek,wf_process_policy_user,test_data_auth_vo,install_app,demoUserSendVariables,wf_service_method_hook,TestOrderOrg,wf_process_model_view,OppmCompanyReport,seed_ds_test,TestOrg,shard_list_test,seed_test source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
368
+2025/08/13 17:08:59 收到消息 ID=1755076138108-0, 内容=map[key:er_a,test_eam_maintenance_document,worker_instance_info,demo_order,main_ds_test,host_node,demo_product_b,demo_product_c,TestDataSource,base_model_test,test_eam_maintenance_other_expenses,OppmIndustry,inherit_model_test,OppmRefProLineOrg,demoUserReceiverVariables,demo_supplier,TestRole,TestTest,MetaPropertyVm,wf_process_test source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
369
+2025/08/13 17:08:59 收到消息 ID=1755076138218-0, 内容=map[key:test_eam_fault_maintenance_order,OppmMonthTarget,normal_scene_test,many_to_many_c,test_eam_maintenance_item,broadcast_test,buss_order,base_importexportexcel,crud_ds_test_ref,test_eam_maintenance_working_hours,worker_registration_info,sie_order_item,host_node_vm,UserVm,buss_ds_test,test_eam_maintenance_information,er_d,MetaAppVm,er_b,test_data_auth source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
370
+2025/08/13 17:08:59 收到消息 ID=1755076138267-0, 内容=map[key:product_ds_test,OppmRefOrgAccount,OppmAreaSalesManager,buss_wallet,OppmOpportunityNextWeek,receiverVariables,shard_month_test,TestRule,ops_trace_handle_service,demo_user,senderVariables,OppmAreaReport,ops_tracking_service_model source:9c408177-4c1f-45ea-90e7-3194c04f0aef type:update]
371
+2025/08/13 17:09:01 收到消息 ID=1755076140128-0, 内容=map[key:OppmMonthTarget source:4abda77c-aedb-4f48-9186-8f65fded6840 type:update]
372
+2025/08/13 17:09:01 收到消息 ID=1755076140163-0, 内容=map[key:OppmProLine source:4abda77c-aedb-4f48-9186-8f65fded6840 type:update]
373
+
374
+```
375
+
376
+### 场景4:模拟大量卸载安装
377
+**步骤**:
378
+用脚本进行一晚的安装卸载APP,测试是否有问题
379
+**验证方式**:
380
+使用Topic检查文档(我之前提供的文档),检查对应的客户端能否正常及时消费
381
+检查slowlog是否有超过200ms的执行命令
382
+
383
+### 场景5:模拟长时间运行
384
+**步骤**:
385
+不停止Redis和后端,测试运行一星期的效果
386
+**验证方式**:
387
+使用Topic检查文档(我之前提供的文档),检查对应的客户端能否正常及时消费
388
+检查slowlog是否有超过200ms的执行命令
... ...
\ No newline at end of file