-
Notifications
You must be signed in to change notification settings - Fork 17
/
seapath_cluster_definition_example.yml
369 lines (323 loc) · 20.6 KB
/
seapath_cluster_definition_example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
---
all:
children:
cluster_machines:
children:
hypervisors:
hosts:
node1:
# ansible variables
ansible_host: 10.10.10.1
hostname: "node1"
# Netplan configuration files. These are examples.
# For advance configuration, the user must write its own.
netplan_configurations :
- inventories/netplan_admin_br0_example.yaml.j2
- inventories/netplan_ptp_interface_example.yaml.j2
- inventories/netplan_cluster_example.yaml.j2
# Main network configuration
network_interface: eno8303 #admin interface
ip_addr: "{{ ansible_host }}"
subnet: 25 #subnet for main interface (br0), default is 24, override if necessary
#br0vlan: 159 #if the main interface on br0 is on a vlan that the host must manage
# snmp network settings
snmp_admin_ip_addr: 10.10.11.1 #used to listen to snmp requests
# PTP network settings
ptp_interface: "eno12419" #OPTIONAL PTP Interface
ptp_vlanid: 100 #OPTIONAL VlanID for PTP
ptp_delay_mechanism: P2P # OPTIONAL E2E or P2P defaut is P2P
ptp_network_transport: L2 # OPTIONAL L2 or UDPv4 default is L2
# Cluster network settings
team0_0: "eno12399" # cluster network first interface
team0_1: "eno12409" # cluster network second interface
#hsr_mac_address: "70:FF:76:1C:0E:8C" # if using HSR for cluster network, you need to specify the mac address for each host
cluster_next_ip_addr : "192.168.55.2" #node 2 cluster network ip
cluster_previous_ip_addr : "192.168.55.3" #node 3 cluster network ip
cluster_ip_addr: "192.168.55.1"
#team0subnet: 24 #subnet for cluster interface (team0), default is 24, override if necessary
# OVS configuration (see ovs_topology inventory)
br_rstp_priority: 12288
brBRIDGE1_ext: eno1234 #physical nic to use with BRIDGE1
ovs_vsctl_cmds: # Extra ovs-vsctl commands to be run by the network playbook
- "set Bridge brBRIDGE1 netflow=@nf0 -- --id=@nf0 create NetFlow targets=\\\"192.168.10.1:2055\\\" add_id_to_interface=true"
# Affinity
nics_affinity: # Optional, only useful with RT containers or macvtag VMs
- eth0: 0-3,4-7 # NICs and their associated CPUs list
- eth1: 8-11,12-15 # NICs and their associated CPUs list
node2:
# ansbile variables
ansible_host: 10.10.10.2
hostname: "node2"
netplan_configurations :
- inventories/netplan_admin_br0_example.yaml.j2
- inventories/netplan_ptp_interface_example.yaml.j2
- inventories/netplan_cluster_example.yaml.j2
# Main network configuration
network_interface: eno8303
ip_addr: "{{ ansible_host }}"
subnet: 25
# snmp network settings
snmp_admin_ip_addr: 10.10.11.1 #used to listen to snmp requests
# PTP network settings
ptp_interface: "eno12419" #OPTIONAL PTP Interface
ptp_vlanid: 100 #OPTIONAL VlanID for PTP
ptp_delay_mechanism: P2P # OPTIONAL E2E or P2P defaut is P2P
ptp_network_transport: L2 # OPTIONAL L2 or UDPv4 default is L2
# Cluster network settings
team0_0: "eno12399"
team0_1: "eno12409"
#hsr_mac_address: "70:FF:76:1C:0E:8D" # if using HSR for cluster network, you need to specify the mac address for each host
cluster_next_ip_addr : "192.168.55.3" #node 3 cluster network ip
cluster_previous_ip_addr : "192.168.55.1" #node 1 cluster network ip
cluster_ip_addr: "192.168.55.2"
#team0subnet: 24 #subnet for cluster interface (team0), default is 24, override if necessary
br_rstp_priority: 16384
brBRIDGE1_ext: eno1234 #physical nic to use with BRIDGE1 (see the ovs topology inventory)
# Affinity
nics_affinity: # Optional, only useful with RT containers or macvtag VMs
- eth0: 0-3,4-7 # NICs and their associated CPUs list
- eth1: 8-11,12-15 # NICs and their associated CPUs list
node3:
# ansbile variables
ansible_host: 10.10.10.3
hostname: "node3"
netplan_configurations :
- inventories/netplan_admin_br0_example.yaml.j2
- inventories/netplan_ptp_interface_example.yaml.j2
- inventories/netplan_cluster_example.yaml.j2
# Main network interface configuration
network_interface: eno8303
ip_addr: "{{ ansible_host }}"
subnet: 25
# snmp network settings
snmp_admin_ip_addr: 10.10.11.1 #used to listen to snmp requests
# PTP network settings
ptp_interface: "eno12419" #OPTIONAL PTP Interface
ptp_vlanid: 100 #OPTIONAL VlanID for PTP
ptp_delay_mechanism: P2P # OPTIONAL E2E or P2P defaut is P2P
ptp_network_transport: L2 # OPTIONAL L2 or UDPv4 default is L2
# Cluster network settings
team0_0: "eno12399"
team0_1: "eno12409"
#hsr_mac_address: "70:FF:76:1C:0E:8E" # if using HSR for cluster network, you need to specify the mac address for each host
cluster_next_ip_addr : "192.168.55.1" #ip de hyperviseur 1
cluster_previous_ip_addr : "192.168.55.2" #ip de l'hyperviseur 2
cluster_ip_addr: "192.168.55.3"
#team0subnet: 24 #subnet for cluster interface (team0), default is 24, override if necessary
br_rstp_priority: 16384
brBRIDGE1_ext: eno1234 #physical nic to use with BRIDGE1 (see the ovs topology inventory)
# Affinity
nics_affinity: # Optional, only useful with RT containers or macvtag VMs
- eth0: 0-3,4-7 # NICs and their associated CPUs list
- eth1: 8-11,12-15 # NICs and their associated CPUs list
# hypervisors common vars
vars:
# SRIOV, Optional configuration
sriov_driver: 'igb'
sriov:
'eno12429': 7
# Syslog log sending configuration
# Syslog client keys. If not set, syslog will send logs using an unencrypted connection.
syslog_tls_ca: ../keys/syslog_tls_ca.crt
syslog_tls_key: ../keys/syslog_tls_key.pem
syslog_server_ip: "10.10.10.10" # If not set, syslog will not send any logs from network
syslog_tls_port: 6514 #optional: default is 6514
syslog_tcp_port: 514 #optional: default is 601. Used only if syslog_tls_ca and syslog_tls_key are not defined
#OPTIONAL
logstash_server_ip: 10.10.10.10
# Realtime/CPUs cgroups isolation configuration
# All those parameters are optional.
isolcpus: "2-5,14-17" # CPUs to isolate
#custom_tuned_profile_path: ../inventories/custom_tuned.conf.j2
cpusystem: "0,12" # CPUs reserves for system
cpuuser: "1,13" # CPUs reserves for user applications
cpumachines: "2-11,14-23" # CPUs reserves for VMs
cpumachinesrt: "2-5,14-17" # CPUs reserves for VMs realtime
cpumachinesnort: "6-11,18-23" # CPUs reserves for VMs non realtime
cpuovs: "23" # CPUs reserves for OVS
# Ceph groups
# All machines in the cluster must be part of mons groups
mons:
hosts:
node1:
node2:
node3:
# Machines that will be used as OSDs (which will store data)
osds:
hosts:
node1:
node2:
node3:
vars:
# Ceph settings
# ceph_osd_disk needs to be set to the "/dev/disk/by-path/" link of the disk used by the osd
ceph_osd_disk: "/dev/disk/by-path/pci-0000:c3:00.0-scsi-0:0:1:0"
# Required variables by ceph-ansible:
# lvm_volumes: # Optional
# - data: lv_ceph # Name of the logical volume to use for the CEPH OSD volume
# data_vg: vg_ceph # Name of the volume group to use for the CEPH OSD volume
# data_size: 5G # Size of the logical volume, default in megabytes, possible values: [0-9]+[bBsSkKmMgGtTpPeE]
# device: "{{ ceph_osd_disk }}"
# device_number: 1 # Number of the partition in the ceph_osd_disk
# device_size: 7GiB # Size of the partition in the ceph_osd_disk, default in megabytes, possible units: B, KB, KiB, MB, MiB, GB, GiB, TB, TiB
devices: # if lvm_volumes is not defined, ceph use the entirely disk
- "{{ ceph_osd_disk }}"
# Machines that will be used as clients (which will use Ceph)
# All machines in the cluster must be part of clients groups
# You can also add some administation machines in this group
clients:
hosts:
node1:
node2:
node3:
vars:
# Required variables by ceph-ansible.
# These are SEAPATH needed overrides. Do not change unless you know exactly what you are doing
user_config: true
rbd:
name: "rbd"
application: "rbd"
pg_autoscale_mode: on
target_size_ratio: 1
pools:
- "{{ rbd }}"
keys:
- name: client.libvirt
caps:
mon: 'profile rbd, allow command "osd blacklist"'
osd: "allow class-read object_prefix rbd_children, profile rbd pool=rbd"
mode: "{{ ceph_keyring_permissions }}"
# Common vars for all hosts
vars:
# Ansible vars
ansible_connection: ssh
ansible_user: ansible
ansible_python_interpreter: /usr/bin/python3
ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
ansible_remote_tmp: /tmp/.ansible/tmp
# Main network configuration
gateway_addr: 10.0.0.1
dns_servers: #remove this variable to disable dns, leave this variable empty for DNS via DHCP
- 8.8.8.8
- 4.4.4.4
# NTP servers used by timemaster
ntp_servers:
- "185.254.101.25"
- "51.145.123.29"
# Ceph settings
# Required variables by ceph-ansible.
# These are SEAPATH needed overrides. Do not change unless specified (osd_pool_default*) or you know exactly what you are doing
configure_firewall: false
ntp_service_enabled: false
ceph_origin: distro
monitor_address: "{{ cluster_ip_addr }}"
public_network: "192.168.55.0/24"
cluster_network: "{{ public_network }}"
ceph_conf_overrides:
global:
osd_pool_default_size: 3 # to be set to the number storage nodes
osd_pool_default_min_size: 2 # to be set to the number of nodes - the number of nodes we are allowing ourselve to lose = the minimum number of nodes needed online for the pool to be available
osd_pool_default_pg_num: 128
osd_pool_default_pgp_num: 128
osd_crush_chooseleaf_type: 1
mon_osd_min_down_reporters: 1
mon:
auth_allow_insecure_global_id_reclaim: false
osd:
osd_min_pg_log_entries: 500
osd_max_pg_log_entries: 500
osd memory target: 8076326604
dashboard_enabled: false
# Local path variables
image_directory: "/var/jenkins_home/images"
vm_directory: "/mnt/seapath/vm/"
vms_disks_directory: "{{ vm_directory }}"
vms_config_directory: "{{ vm_directory }}"
# qcow2tmpuploadfolder: folder to store the qcow2 file while importing a guest.
#qcow2tmpuploadfolder: "/data/.ansible"
# Grub password
# The password hash generated with "grub-mkpasswd-pbkdf2 -c 65536 -s 256 -l 64", in this example the pass is "toto"
grub_password: grub.pbkdf2.sha512.65536.E291D66AEEB3C22BD6B019C5C3587A3094AE93D61E20D134EC6324925AE5045DCA61EF30B3BD04B4D6F7360B9C9B242AA68B1643CCB269C53658EC959B5964ADB9C9D5FAA280A291D8F95E3F255254A4119A2431AFE797F1949EE4FBBC4C74281C550C83DAED26C254224061BEFCEEBF8091A8D1BE406EBB3A3E8A519E36B4FE161BE191D407193E5DFEBCC09F8822B4060EA9CD1E6B8677D40D32826EF025CA494BCD209032F7CF2A4A2E74717E6D17E87A62AAA93E458C96F983E69FBFC4FD602403988EAF5AADCA4B5B145B0F6C6FFB53F55CD6C56C15C17B2F8A5B3A214F3140470566597760D9388084AE978DB5C0EBF7C868A855DB38DACA47A010417A.FDA0C3188FAC87FFA04D862AC9B1020F29FEEA3B9590BE534330D8C7CAB71444CBF527E39D7DAE640545139202B9CA77822CCE21AB1134110F6AF3EFD793E848
# Grub option: this will be added to the grub cmdline
grub_append: "console=ttyS0,115200"
# Extra kernel modules to be loaded at boot (you may need this depending on your hardware)
extra_kernel_modules: []
# Extra sysctl kernel configuration items pour physical machines (content of file /etc/sysctl.d/00-seapathextra_physicalmachines.conf)
#extra_sysctl_physical_machines: |
# net.ipv4.ip_forward = 1
# net.ipv4.conf.all.promote_secondaries = 1
# Optional list variable to change apt repositories. All repositories will be overrides.
# If defined as empty list [], all apt repositories will be removed
apt_repo:
- http://ftp.fr.debian.org/debian bookworm main contrib non-free non-free-firmware
- http://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware
- http://ftp.fr.debian.org/debian bookworm-backports main contrib non-free non-free-firmware
- https://artifacts.elastic.co/packages/8.x/apt stable main
# Default user with admin privileges, and password hash
admin_user: virtu
admin_passwd: "XXXXXX"
# SSH public keys for the admin_user
admin_ssh_keys:
- "ssh-rsa key1XXX"
- "ssh-rsa key2XXX"
# account used for libvirt live-migration. If this variable is not set, the livemigration feature will be disabled.
livemigration_user: livemigration
#pacemaker_shutdown_timeout: "2min"
extra_crm_cmd_to_run: |
# for ntp status monitoting, be sure host_ip is the ip address of the server and not its hostname
primitive ntpstatus_test ocf:seapath:ntpstatus params host_ip=10.10.10.10 multiplier=500 op monitor timeout=10 interval=10
primitive ptpstatus_test ocf:seapath:ptpstatus op monitor timeout=10 interval=10 op_params multiplier=1000
clone cl_ntpstatus_test ntpstatus_test meta target-role=Started
clone cl_ptpstatus_test ptpstatus_test meta target-role=Started
# if br0vlan is set, then the host's remote access network ip is on a vlan, but a trunk port has been given to the host so that guests can connect to other vlans. You create the interfaces for those guests here:
interfaces_on_br0:
INTERFACE1: 159 #access port on vlan 159
INTERFACE2: [159,160] #trunk port on vlan 159 and 160
INTERFACE3: [1-4094] # no filtering, excluding untagged trafic
INTERFACE4: [] # no filtering, including untagged trafic
#interfaces_to_wait_for:#by default, systemd-networkd-wait-online will only wait for the cluster_network interface (team0 or hsr0). In some setup it is useful to customize the network_interface the most relevant to wait for at boot time. This variable will add interfaces networkd will wait for at boot time
# - br0
#hosts_path: "../inventories/hosts.j2" #when defined, it templates then uploads this file at the bottom of /etc/hosts
iptables_rules_path: "../inventories/iptables_rules_example.txt" #when defined, it uploads this file to /etc/iptables/rules.v4 and loads those rules
#iptables_rules_template_path: "../inventories/iptables_rules_example.txt.j2" #when defined, it templates then uploads this file to /etc/iptables/rules.v4 and loads those rules
#cluster_protocol: "HSR" # RSTP or HSR, default is RSTP
chrony_wait_timeout_sec: 180 #time in seconds the boot sequence will wait for chrony to sync. Optional : default is 180
#enable_vmmgr_http_api: false
#admin_cluster_ip: "10.10.11.4"
#vmmgr_http_port: 8080
#vmmgr_http_tls_crt_path: "../keys/vmmgr_http_tls_crt.pem"
#vmmgr_http_tls_key_path: "../keys/vmmgr_http_tls_key.pem"
#vmmgr_http_api_acl: |
# allow 10.10.12.12/32;
# allow {{ admin_cluster_ip }}/32;
# deny all;
#vmmgr_http_local_auth_file: "/var/local/vmmgrapi/.htpasswd"
# Optional list of local files to upload to the remote machines
upload_files:
# for example you can override some script to add customizations
- { src: '../inventories_private/snmp_getdata.py', dest: '/usr/local/sbin/snmp_getdata.py', mode: "0755" }
# you can also upload arbitrary file you may need on your setup
- { src: '../inventories_private/toto.txt', dest: '/tmp/toto' }
# you can also upload and uncompress an archive file (set extract: true), in that case the destination must be a folder
- { src: '../inventories_private/toto.tar.bz2', dest: '/tmp/folder1', extract: true }
commands_to_run_after_upload:
- "/usr/bin/systemctl restart snmpd"
# optional extra directives to add to snmpd.conf
extra_snmpd_directives: |
extend crmstatus /usr/bin/timeout 1s /usr/bin/sudo -u hacluster /usr/local/bin/snmp_crmstatus.sh
# optional extra permissions to add to Debian-snmp sudoers file
extra_usersnmp_sudoers: |
{{ snmp_user_name }} ALL = (hacluster) NOPASSWD:EXEC: /usr/local/bin/snmp_crmstatus.sh ""
# optional conntrackd synchronization configuration. This is useful when using the hosts as routers if you want the conntrack tables to be synchronized among hosts. If conntrackd_ignore_ip_list is defined it will enable the appropriate portion of the playbook, and the content will be used as the list of IP addresses to IGNORE when synchronizing the conntrack table
conntrackd_ignore_ip_list: |
IPv4_address 127.0.0.0/8
IPv4_address 172.0.0.0/8
IPv4_address 192.0.0.0/8
# Account to be created for SNMP v3 support. Optional.
#snmp_accounts:
# - name: user1
# password: mypassword1
# - name: user2
# password: mypassword2
# - name: user3
# password: mypassword3