-
Notifications
You must be signed in to change notification settings - Fork 271
/
Copy pathsimu.yml
497 lines (457 loc) · 26.6 KB
/
simu.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
---
#==============================================================#
# File : simu.yml
# Desc : Pigsty Simubox: a 36 node prod simulation env
# Ctime : 2023-07-20
# Mtime : 2025-01-11
# Docs : https://pigsty.io/docs/conf/simu/
# License : AGPLv3 @ https://pigsty.io/docs/about/license
# Copyright : 2018-2025 Ruohang Feng / Vonng (rh@vonng.com)
#==============================================================#
# this config template was intended to use the ent offline packages
# which contains postgres 13-17 and all other required packages
all:
children:
#==========================================================#
# infra: 2 nodes
#==========================================================#
# ./infra.yml -l infra
# ./docker.yml -l infra (optional)
infra:
hosts:
10.10.10.10: {}
10.10.10.11: {}
vars:
docker_enabled: true
#docker_registry_mirrors: [https://docker.m.daocloud.io, https://dockerproxy.com]
node_conf: oltp # use oltp template for infra nodes
pg_conf: oltp.yml # use oltp template for infra pgsql
pg_exporters: # bin/pgmon-add pg-meta2/pg-test2/pg-src2/pg-dst2
20001: {pg_cluster: pg-meta2 ,pg_seq: 1 ,pg_host: 10.10.10.10, pg_databases: [{ name: meta }]}
20002: {pg_cluster: pg-meta2 ,pg_seq: 2 ,pg_host: 10.10.10.11, pg_databases: [{ name: meta }]}
20003: {pg_cluster: pg-test2 ,pg_seq: 1 ,pg_host: 10.10.10.41, pg_databases: [{ name: test }]}
20004: {pg_cluster: pg-test2 ,pg_seq: 2 ,pg_host: 10.10.10.42, pg_databases: [{ name: test }]}
20005: {pg_cluster: pg-test2 ,pg_seq: 3 ,pg_host: 10.10.10.43, pg_databases: [{ name: test }]}
20006: {pg_cluster: pg-test2 ,pg_seq: 4 ,pg_host: 10.10.10.44, pg_databases: [{ name: test }]}
20007: {pg_cluster: pg-src2 ,pg_seq: 1 ,pg_host: 10.10.10.45, pg_databases: [{ name: src }]}
20008: {pg_cluster: pg-src2 ,pg_seq: 2 ,pg_host: 10.10.10.46, pg_databases: [{ name: src }]}
20009: {pg_cluster: pg-src2 ,pg_seq: 3 ,pg_host: 10.10.10.47, pg_databases: [{ name: src }]}
20010: {pg_cluster: pg-dst2 ,pg_seq: 3 ,pg_host: 10.10.10.48, pg_databases: [{ name: dst }]}
20011: {pg_cluster: pg-dst2 ,pg_seq: 4 ,pg_host: 10.10.10.49, pg_databases: [{ name: dst }]}
#==========================================================#
# nodes: 36 nodes
#==========================================================#
# ./node.yml
nodes:
hosts:
10.10.10.10 : { nodename: meta1 ,node_cluster: meta ,pg_cluster: pg-meta ,pg_seq: 1 ,pg_role: primary, infra_seq: 1 }
10.10.10.11 : { nodename: meta2 ,node_cluster: meta ,pg_cluster: pg-meta ,pg_seq: 2 ,pg_role: replica, infra_seq: 2 }
10.10.10.12 : { nodename: pg12 ,node_cluster: pg12 ,pg_cluster: pg-v12 ,pg_seq: 1 ,pg_role: primary }
10.10.10.13 : { nodename: pg13 ,node_cluster: pg13 ,pg_cluster: pg-v13 ,pg_seq: 1 ,pg_role: primary }
10.10.10.14 : { nodename: pg14 ,node_cluster: pg14 ,pg_cluster: pg-v14 ,pg_seq: 1 ,pg_role: primary }
10.10.10.15 : { nodename: pg15 ,node_cluster: pg15 ,pg_cluster: pg-v15 ,pg_seq: 1 ,pg_role: primary }
10.10.10.16 : { nodename: pg16 ,node_cluster: pg16 ,pg_cluster: pg-v16 ,pg_seq: 1 ,pg_role: primary }
10.10.10.17 : { nodename: pg17 ,node_cluster: pg17 ,pg_cluster: pg-v17 ,pg_seq: 1 ,pg_role: primary }
10.10.10.18 : { nodename: proxy1 ,node_cluster: proxy ,vip_address: 10.10.10.20 ,vip_vrid: 20 ,vip_interface: eth1 ,vip_role: master }
10.10.10.19 : { nodename: proxy2 ,node_cluster: proxy ,vip_address: 10.10.10.20 ,vip_vrid: 20 ,vip_interface: eth1 ,vip_role: backup }
10.10.10.21 : { nodename: minio1 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 1 ,etcd_cluster: etcd ,etcd_seq: 1}
10.10.10.22 : { nodename: minio2 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 2 ,etcd_cluster: etcd ,etcd_seq: 2}
10.10.10.23 : { nodename: minio3 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 3 ,etcd_cluster: etcd ,etcd_seq: 3}
10.10.10.24 : { nodename: minio4 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 4 ,etcd_cluster: etcd ,etcd_seq: 4}
10.10.10.25 : { nodename: minio5 ,node_cluster: minio ,minio_cluster: minio ,minio_seq: 5 ,etcd_cluster: etcd ,etcd_seq: 5}
10.10.10.40 : { nodename: node40 ,node_id_from_pg: true }
10.10.10.41 : { nodename: node41 ,node_id_from_pg: true }
10.10.10.42 : { nodename: node42 ,node_id_from_pg: true }
10.10.10.43 : { nodename: node43 ,node_id_from_pg: true }
10.10.10.44 : { nodename: node44 ,node_id_from_pg: true }
10.10.10.45 : { nodename: node45 ,node_id_from_pg: true }
10.10.10.46 : { nodename: node46 ,node_id_from_pg: true }
10.10.10.47 : { nodename: node47 ,node_id_from_pg: true }
10.10.10.48 : { nodename: node48 ,node_id_from_pg: true }
10.10.10.49 : { nodename: node49 ,node_id_from_pg: true }
10.10.10.50 : { nodename: node50 ,node_id_from_pg: true }
10.10.10.51 : { nodename: node51 ,node_id_from_pg: true }
10.10.10.52 : { nodename: node52 ,node_id_from_pg: true }
10.10.10.53 : { nodename: node53 ,node_id_from_pg: true }
10.10.10.54 : { nodename: node54 ,node_id_from_pg: true }
10.10.10.55 : { nodename: node55 ,node_id_from_pg: true }
10.10.10.56 : { nodename: node56 ,node_id_from_pg: true }
10.10.10.57 : { nodename: node57 ,node_id_from_pg: true }
10.10.10.58 : { nodename: node58 ,node_id_from_pg: true }
10.10.10.59 : { nodename: node59 ,node_id_from_pg: true }
10.10.10.88 : { nodename: test }
#==========================================================#
# etcd: 5 nodes used as dedicated minio cluster
#==========================================================#
# ./etcd.yml -l etcd;
etcd:
hosts:
10.10.10.21: {}
10.10.10.22: {}
10.10.10.23: {}
10.10.10.24: {}
10.10.10.25: {}
vars: {}
#==========================================================#
# minio: 3 nodes used as dedicated minio cluster
#==========================================================#
# ./minio.yml -l minio;
minio:
hosts:
10.10.10.21: {}
10.10.10.22: {}
10.10.10.23: {}
10.10.10.24: {}
10.10.10.25: {}
vars:
minio_data: '/data{1...4}' # 5 node x 4 disk
#==========================================================#
# proxy: 2 nodes used as dedicated haproxy server
#==========================================================#
# ./node.yml -l proxy
proxy:
hosts:
10.10.10.18: {}
10.10.10.19: {}
vars:
vip_enabled: true
haproxy_services: # expose minio service : sss.pigsty:9000
- name: minio # [REQUIRED] service name, unique
port: 9000 # [REQUIRED] service port, unique
balance: leastconn # Use leastconn algorithm and minio health check
options: [ "option httpchk", "option http-keep-alive", "http-check send meth OPTIONS uri /minio/health/live", "http-check expect status 200" ]
servers: # reload service with ./node.yml -t haproxy_config,haproxy_reload
- { name: minio-1 ,ip: 10.10.10.21 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-2 ,ip: 10.10.10.22 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-3 ,ip: 10.10.10.23 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-4 ,ip: 10.10.10.24 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
- { name: minio-5 ,ip: 10.10.10.25 ,port: 9000 ,options: 'check-ssl ca-file /etc/pki/ca.crt check port 9000' }
#==========================================================#
# pg-meta: reuse infra node as meta cmdb
#==========================================================#
# ./pgsql.yml -l pg-meta
pg-meta:
hosts:
10.10.10.10: { pg_seq: 1 , pg_role: primary }
10.10.10.11: { pg_seq: 2 , pg_role: replica }
vars:
pg_cluster: pg-meta
pg_vip_enabled: true
pg_vip_address: 10.10.10.2/24
pg_vip_interface: eth1
pg_users:
- {name: dbuser_meta ,password: DBUser.Meta ,pgbouncer: true ,roles: [dbrole_admin] ,comment: pigsty admin user }
- {name: dbuser_view ,password: DBUser.Viewer ,pgbouncer: true ,roles: [dbrole_readonly] ,comment: read-only viewer for meta database }
- {name: dbuser_grafana ,password: DBUser.Grafana ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for grafana database }
- {name: dbuser_bytebase ,password: DBUser.Bytebase ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for bytebase database }
- {name: dbuser_kong ,password: DBUser.Kong ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for kong api gateway }
- {name: dbuser_gitea ,password: DBUser.Gitea ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for gitea service }
- {name: dbuser_wiki ,password: DBUser.Wiki ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for wiki.js service }
- {name: dbuser_noco ,password: DBUser.Noco ,pgbouncer: true ,roles: [dbrole_admin] ,comment: admin user for nocodb service }
pg_databases:
- { name: meta ,baseline: cmdb.sql ,comment: pigsty meta database ,schemas: [pigsty] ,extensions: [{name: vector}]}
- { name: grafana ,owner: dbuser_grafana ,revokeconn: true ,comment: grafana primary database }
- { name: bytebase ,owner: dbuser_bytebase ,revokeconn: true ,comment: bytebase primary database }
- { name: kong ,owner: dbuser_kong ,revokeconn: true ,comment: kong the api gateway database }
- { name: gitea ,owner: dbuser_gitea ,revokeconn: true ,comment: gitea meta database }
- { name: wiki ,owner: dbuser_wiki ,revokeconn: true ,comment: wiki meta database }
- { name: noco ,owner: dbuser_noco ,revokeconn: true ,comment: nocodb database }
pg_hba_rules:
- { user: dbuser_view , db: all ,addr: infra ,auth: pwd ,title: 'allow grafana dashboard access cmdb from infra nodes' }
pg_libs: 'pg_stat_statements, auto_explain' # add timescaledb to shared_preload_libraries
node_crontab: # make a full backup on monday 1am, and an incremental backup during weekdays
- '00 01 * * 1 postgres /pg/bin/pg-backup full'
- '00 01 * * 2,3,4,5,6,7 postgres /pg/bin/pg-backup'
#==========================================================#
# pg-v13 - v17
#==========================================================#
# ./pgsql.yml -l pg-v*
pg-v12:
hosts: { 10.10.10.12: {}}
vars:
pg_version: 13
pg_service_provider: proxy # use load balancer on group `proxy` with port 10012
pg_default_services: [{ name: primary ,port: 10012 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v13:
hosts: { 10.10.10.13: {}}
vars:
pg_version: 13
pg_service_provider: proxy # use load balancer on group `proxy` with port 10013
pg_default_services: [{ name: primary ,port: 10013 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v14:
hosts: { 10.10.10.14: {}}
vars:
pg_version: 14
pg_service_provider: proxy # use load balancer on group `proxy` with port 10014
pg_default_services: [{ name: primary ,port: 10014 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v15:
hosts: { 10.10.10.15: {}}
vars:
pg_version: 15
pg_service_provider: proxy # use load balancer on group `proxy` with port 10015
pg_default_services: [{ name: primary ,port: 10015 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v16:
hosts: { 10.10.10.16: {}}
vars:
pg_version: 16
pg_service_provider: proxy # use load balancer on group `proxy` with port 10016
pg_default_services: [{ name: primary ,port: 10016 ,dest: postgres ,check: /primary ,selector: "[]" }]
pg-v17:
hosts: { 10.10.10.17: {}}
vars:
pg_version: 17
pg_service_provider: proxy # use load balancer on group `proxy` with port 10017
pg_default_services: [{ name: primary ,port: 10017 ,dest: postgres ,check: /primary ,selector: "[]" }]
#==========================================================#
# pg-pitr: single node
#==========================================================#
# ./pgsql.yml -l pg-pitr
pg-pitr:
hosts:
10.10.10.40: { pg_seq: 1 ,pg_role: primary }
vars:
pg_cluster: pg-pitr
pg_databases: [{ name: test }]
#==========================================================#
# pg-test: dedicate 4 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-test
pg-test:
hosts:
10.10.10.41: { pg_seq: 1 ,pg_role: primary }
10.10.10.42: { pg_seq: 2 ,pg_role: replica }
10.10.10.43: { pg_seq: 3 ,pg_role: replica }
10.10.10.44: { pg_seq: 4 ,pg_role: replica }
vars:
pg_cluster: pg-test
pg_vip_enabled: true
pg_vip_address: 10.10.10.3/24
pg_vip_interface: eth1
pg_users: [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
pg_databases: [{ name: test }]
#==========================================================#
# pg-src: dedicate 3 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-src
pg-src:
hosts:
10.10.10.45: { pg_seq: 1 ,pg_role: primary }
10.10.10.46: { pg_seq: 2 ,pg_role: replica }
10.10.10.47: { pg_seq: 3 ,pg_role: replica }
vars:
pg_cluster: pg-src
#pg_version: 14
pg_vip_enabled: true
pg_vip_address: 10.10.10.4/24
pg_vip_interface: eth1
pg_users: [{ name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] }]
pg_databases: [{ name: src }]
#==========================================================#
# pg-dst: dedicate 2 node testing cluster
#==========================================================#
# ./pgsql.yml -l pg-dst
pg-dst:
hosts:
10.10.10.48: { pg_seq: 1 ,pg_role: primary } # 8C 8G
10.10.10.49: { pg_seq: 2 ,pg_role: replica } # 1C 2G
vars:
pg_cluster: pg-dst
pg_vip_enabled: true
pg_vip_address: 10.10.10.5/24
pg_vip_interface: eth1
node_hugepage_ratio: 0.3
pg_users: [ { name: test , password: test , pgbouncer: true , roles: [ dbrole_admin ] } ]
pg_databases: [ { name: dst } ]
#==========================================================#
# pg-citus: 10 node citus cluster (5 x primary-replica pair)
#==========================================================#
pg-citus: # citus group
hosts:
10.10.10.50: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 0, pg_role: primary }
10.10.10.51: { pg_group: 0, pg_cluster: pg-citus0 ,pg_vip_address: 10.10.10.60/24 ,pg_seq: 1, pg_role: replica }
10.10.10.52: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 0, pg_role: primary }
10.10.10.53: { pg_group: 1, pg_cluster: pg-citus1 ,pg_vip_address: 10.10.10.61/24 ,pg_seq: 1, pg_role: replica }
10.10.10.54: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 0, pg_role: primary }
10.10.10.55: { pg_group: 2, pg_cluster: pg-citus2 ,pg_vip_address: 10.10.10.62/24 ,pg_seq: 1, pg_role: replica }
10.10.10.56: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 0, pg_role: primary }
10.10.10.57: { pg_group: 3, pg_cluster: pg-citus3 ,pg_vip_address: 10.10.10.63/24 ,pg_seq: 1, pg_role: replica }
10.10.10.58: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 0, pg_role: primary }
10.10.10.59: { pg_group: 4, pg_cluster: pg-citus4 ,pg_vip_address: 10.10.10.64/24 ,pg_seq: 1, pg_role: replica }
vars:
pg_mode: citus # pgsql cluster mode: citus
pg_version: 16 # citus does not have pg16 available
pg_shard: pg-citus # citus shard name: pg-citus
pg_primary_db: citus # primary database used by citus
pg_vip_enabled: true # enable vip for citus cluster
pg_vip_interface: eth1 # vip interface for all members
pg_dbsu_password: DBUser.Postgres # enable dbsu password access for citus
pg_extensions: [ citus, postgis, pgvector, topn, pg_cron, hll ] # install these extensions
pg_libs: 'citus, pg_cron, pg_stat_statements' # citus will be added by patroni automatically
pg_users: [{ name: dbuser_citus ,password: DBUser.Citus ,pgbouncer: true ,roles: [ dbrole_admin ] }]
pg_databases: [{ name: citus ,owner: dbuser_citus ,extensions: [ citus, vector, topn, pg_cron, hll ] }]
pg_parameters:
cron.database_name: citus
citus.node_conninfo: 'sslrootcert=/pg/cert/ca.crt sslmode=verify-full'
pg_hba_rules:
- { user: 'all' ,db: all ,addr: 127.0.0.1/32 ,auth: ssl ,title: 'all user ssl access from localhost' }
- { user: 'all' ,db: all ,addr: intra ,auth: ssl ,title: 'all user ssl access from intranet' }
#==========================================================#
# redis-meta: reuse the 5 etcd nodes as redis sentinel
#==========================================================#
# ./redis.yml -l redis-meta
redis-meta:
hosts:
10.10.10.21: { redis_node: 1 , redis_instances: { 26379: {} } }
10.10.10.22: { redis_node: 2 , redis_instances: { 26379: {} } }
10.10.10.23: { redis_node: 3 , redis_instances: { 26379: {} } }
10.10.10.24: { redis_node: 4 , redis_instances: { 26379: {} } }
10.10.10.25: { redis_node: 5 , redis_instances: { 26379: {} } }
vars:
redis_cluster: redis-meta
redis_password: 'redis.meta'
redis_mode: sentinel
redis_max_memory: 256MB
redis_sentinel_monitor: # primary list for redis sentinel, use cls as name, primary ip:port
- { name: redis-src, host: 10.10.10.45, port: 6379 ,password: redis.src, quorum: 1 }
- { name: redis-dst, host: 10.10.10.48, port: 6379 ,password: redis.dst, quorum: 1 }
#==========================================================#
# redis-test: redis native cluster in 4 nodes, 12 instances
#==========================================================#
# ./node.yml -l redis-test; ./redis.yml -l redis-test
redis-test:
hosts:
10.10.10.41: { redis_node: 1 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.42: { redis_node: 2 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.43: { redis_node: 3 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
10.10.10.44: { redis_node: 4 ,redis_instances: { 6379: {} ,6380: {} ,6381: {} } }
vars:
redis_cluster: redis-test
redis_password: 'redis.test'
redis_mode: cluster
redis_max_memory: 64MB
#==========================================================#
# redis-src: reuse pg-src 3 nodes for redis
#==========================================================#
# ./redis.yml -l redis-src
redis-src:
hosts:
10.10.10.45: { redis_node: 1 , redis_instances: {6379: { } }}
10.10.10.46: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.46 6379' } }}
10.10.10.47: { redis_node: 3 , redis_instances: {6379: { replica_of: '10.10.10.45 6379' }, 6380: { replica_of: '10.10.10.47 6379' } }}
vars:
redis_cluster: redis-src
redis_password: 'redis.src'
redis_max_memory: 64MB
#==========================================================#
# redis-dst: reuse pg-dst 2 nodes for redis
#==========================================================#
# ./redis.yml -l redis-dst
redis-dst:
hosts:
10.10.10.48: { redis_node: 1 , redis_instances: {6379: { } }}
10.10.10.49: { redis_node: 2 , redis_instances: {6379: { replica_of: '10.10.10.48 6379' } }}
vars:
redis_cluster: redis-dst
redis_password: 'redis.dst'
redis_max_memory: 64MB
#==========================================================#
# ferret: reuse pg-src as mongo (ferretdb)
#==========================================================#
# ./mongo.yml -l ferret
ferret:
hosts:
10.10.10.45: { mongo_seq: 1 }
10.10.10.46: { mongo_seq: 2 }
10.10.10.47: { mongo_seq: 3 }
vars:
mongo_cluster: ferret
mongo_pgurl: 'postgres://test:test@10.10.10.45:5432/src'
#mongo_pgurl: 'postgres://test:test@10.10.10.3:5436/test'
#==========================================================#
# test: running cli tools and test miscellaneous stuff
#==========================================================#
test:
hosts: { 10.10.10.88: { nodename: test } }
vars:
node_cluster: test
node_packages: [ 'etcd,logcli,mcli,redis' ]
#============================================================#
# Global Variables
#============================================================#
vars:
#==========================================================#
# INFRA
#==========================================================#
version: v3.2.2 # pigsty version string
admin_ip: 10.10.10.10 # admin node ip address
region: china # upstream mirror region: default|china|europe
infra_portal: # domain names and upstream servers
home : { domain: h.pigsty }
grafana : { domain: g.pigsty ,endpoint: "10.10.10.10:3000" , websocket: true }
prometheus : { domain: p.pigsty ,endpoint: "10.10.10.10:9090" }
alertmanager : { domain: a.pigsty ,endpoint: "10.10.10.10:9093" }
blackbox : { endpoint: "10.10.10.10:9115" }
loki : { endpoint: "10.10.10.10:3100" }
minio : { domain: m.pigsty ,endpoint: "10.10.10.21:9001" ,scheme: https ,websocket: true }
postgrest : { domain: api.pigsty ,endpoint: "127.0.0.1:8884" }
pgadmin : { domain: adm.pigsty ,endpoint: "127.0.0.1:8885" }
pgweb : { domain: cli.pigsty ,endpoint: "127.0.0.1:8886" }
bytebase : { domain: ddl.pigsty ,endpoint: "127.0.0.1:8887" }
jupyter : { domain: lab.pigsty ,endpoint: "127.0.0.1:8888" , websocket: true }
supa : { domain: supa.pigsty ,endpoint: "10.10.10.10:8000", websocket: true }
nginx_navbar: []
dns_records: # dynamic dns records resolved by dnsmasq
- 10.10.10.1 h.pigsty a.pigsty p.pigsty g.pigsty
#==========================================================#
# NODE
#==========================================================#
node_id_from_pg: false # use nodename rather than pg identity as hostname
node_conf: tiny # use small node template
node_timezone: Asia/Hong_Kong # use Asia/Hong_Kong Timezone
node_dns_servers: # DNS servers in /etc/resolv.conf
- 10.10.10.10
- 10.10.10.11
node_etc_hosts:
- 10.10.10.10 h.pigsty a.pigsty p.pigsty g.pigsty
- 10.10.10.20 sss.pigsty # point minio serviec domain to the L2 VIP of proxy cluster
node_ntp_servers: # NTP servers in /etc/chrony.conf
- pool cn.pool.ntp.org iburst
- pool 10.10.10.10 iburst
node_admin_ssh_exchange: false # exchange admin ssh key among node cluster
#==========================================================#
# PGSQL
#==========================================================#
pg_conf: tiny.yml
pgbackrest_method: minio # USE THE HA MINIO THROUGH A LOAD BALANCER
pg_dbsu_ssh_exchange: false # do not exchange dbsu ssh key among pgsql cluster
pgbackrest_repo: # pgbackrest repo: https://pgbackrest.org/configuration.html#section-repository
local: # default pgbackrest repo with local posix fs
path: /pg/backup # local backup directory, `/pg/backup` by default
retention_full_type: count # retention full backups by count
retention_full: 2 # keep 2, at most 3 full backup when using local fs repo
minio:
type: s3
s3_endpoint: sss.pigsty # s3_endpoint could be any load balancer: 10.10.10.1{0,1,2}, or domain names point to any of the 3 nodes
s3_region: us-east-1 # you could use external domain name: sss.pigsty , which resolve to any members (`minio_domain`)
s3_bucket: pgsql # instance & nodename can be used : minio-1.pigsty minio-1.pigsty minio-1.pigsty minio-1 minio-2 minio-3
s3_key: pgbackrest # Better using a new password for MinIO pgbackrest user
s3_key_secret: S3User.Backup
s3_uri_style: path
path: /pgbackrest
storage_port: 9000 # Use the load balancer port 9000
storage_ca_file: /etc/pki/ca.crt
bundle: y
cipher_type: aes-256-cbc # Better using a new cipher password for your production environment
cipher_pass: pgBackRest.${pg_cluster}
retention_full_type: time
retention_full: 14
#==========================================================#
# Repo
#==========================================================#
repo_packages: [
node-bootstrap, infra-package, infra-addons, node-package1, node-package2, pgsql-utility,
pg17-core ,pg17-time ,pg17-gis ,pg17-rag ,pg17-fts ,pg17-olap ,pg17-feat ,pg17-lang ,pg17-type ,pg17-util ,pg17-func ,pg17-admin ,pg17-stat ,pg17-sec ,pg17-fdw ,pg17-sim ,pg17-etl ,
pg16-core ,pg16-time ,pg16-gis ,pg16-rag ,pg16-fts ,pg16-olap ,pg16-feat ,pg16-lang ,pg16-type ,pg16-util ,pg16-func ,pg16-admin ,pg16-stat ,pg16-sec ,pg16-fdw ,pg16-sim ,pg16-etl ,
pg15-main, pg14-main, pg13-main
]
...