Skip to content

Commit

Permalink
vip-manager version 1.0
Browse files Browse the repository at this point in the history
Important Changes:
https://github.com/cybertec-postgresql/vip-manager/releases/tag/v1.0

Note: the configuration method has been changed from v1.0
https://github.com/cybertec-postgresql/vip-manager#migrating-configuration-from-releases-before-v10

and
Disable the GPG checking of signatures of vip-manager package being installed (for RedHat/CentOS)

fixed:
TASK [Install vip-manager] *****************************************
fatal: [10.128.64.157]: FAILED! => {"changed": false, "msg": "Failed to validate GPG signature for vip-manager-1.0_1-1.x86_64"}
  • Loading branch information
vitabaks committed Nov 4, 2020
1 parent a9e27e6 commit 75022e9
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 19 deletions.
2 changes: 2 additions & 0 deletions roles/vip-manager/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
package:
name: "/tmp/{{ vip_manager_package_repo | basename }}"
state: present
disable_gpg_check: true
when: ansible_os_family == "RedHat"
when: installation_method == "repo" and vip_manager_package_repo | length > 0
tags: vip, vip_manager, vip_manager_install
Expand All @@ -44,6 +45,7 @@
package:
name: "/tmp/{{ vip_manager_package_file | basename }}"
state: present
disable_gpg_check: true
when: ansible_os_family == "RedHat"
when: installation_method == "file" and vip_manager_package_file | length > 0
tags: vip, vip_manager, vip_manager_install
Expand Down
2 changes: 1 addition & 1 deletion roles/vip-manager/templates/vip-manager.service.j2
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Before=patroni.service
[Service]
Type=simple

ExecStart=/usr/bin/vip-manager -config={{ vip_manager_conf }}
ExecStart=/usr/bin/vip-manager --config={{ vip_manager_conf }}

# VIP not released when service stopped https://github.com/cybertec-postgresql/vip-manager/issues/19
ExecStopPost=/sbin/ip addr del {{ vip_manager_ip }}/{{ vip_manager_mask }} dev {{ vip_manager_iface }}
Expand Down
44 changes: 27 additions & 17 deletions roles/vip-manager/templates/vip-manager.yml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -4,40 +4,50 @@
interval: {{ vip_manager_interval }}

# the etcd or consul key which vip-manager will regularly poll.
key: "/service/{{ patroni_cluster_name }}/leader"
# if the value of the above key matches the NodeName (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface
nodename: "{{ ansible_hostname }}"
trigger-key: "/service/{{ patroni_cluster_name }}/leader"
# if the value of the above key matches the trigger-value (often the hostname of this host), vip-manager will try to add the virtual ip address to the interface specified in Iface
trigger-value: "{{ ansible_hostname }}"

ip: {{ vip_manager_ip }} # the virtual ip address to manage
mask: {{ vip_manager_mask }} # netmask for the virtual ip
iface: {{ vip_manager_iface }} # interface to which the virtual ip will be added
netmask: {{ vip_manager_mask }} # netmask for the virtual ip
interface: {{ vip_manager_iface }} # interface to which the virtual ip will be added

# how the virtual ip should be managed. we currently support "ip addr add/remove" through shell commands or the Hetzner api
hosting_type: basic # possible values: basic, hetzner .
hosting-type: basic # possible values: basic, or hetzner.

endpoint_type: {{ dcs_type }} # etcd or consul
# a list that contains all endpoints to which etcd could talk.
dcs-type: {{ dcs_type }} # etcd or consul
# a list that contains all DCS endpoints to which vip-manager could talk.
{% if not dcs_exists|bool and dcs_type == 'etcd' %}
endpoints:
dcs-endpoints:
{% for host in groups['etcd_cluster'] %}
- http://{{ hostvars[host]['inventory_hostname'] }}:2379
{% endfor %}
{% endif %}
{% if dcs_exists|bool and dcs_type == 'etcd' %}
endpoints:
dcs-endpoints:
{% for etcd_hosts in patroni_etcd_hosts %}
- http://{{ etcd_hosts.host }}:{{ etcd_hosts.port }}
{% endfor %}
{% endif %}

# consul will always only use the first entry from this list.
# For consul, you'll obviously need to change the port to 8500. Unless you're using a different one. Maybe you're a rebel and are running consul on port 2379? Just to confuse people? Why would you do that? Oh, I get it.
# For consul, you'll obviously need to change the port to 8500.

#etcd-user: "patroni"
#etcd-password: "Julian's secret password"

# when etcd-ca-file is specified, TLS connections to the etcd endpoints will be used.
#etcd-ca-file: "/path/to/etcd/trusted/ca/file"
# when etcd-cert-file and etcd-key-file are specified, we will authenticate at the etcd endpoints using this certificate and key.
#etcd-cert-file: "/path/to/etcd/client/cert/file"
#etcd-key-file: "/path/to/etcd/client/key/file"

# etcd_user: "patroni"
# etcd_password: "Julian's secret password"
# don't worry about parameter with a prefix that doesn't match the endpoint_type. You can write anything there, I won't even look at it.
# consul_token: "Julian's secret token"
#consul-token: "Julian's secret token"

# how often things should be retried and how long to wait between retries. (currently only affects arpClient)
retry-num: 2
retry-after: 250 #in milliseconds

#how often things should be retried and how long to wait between retries. (currently only affects arpClient)
retry_num: 2
retry_after: 250 #in milliseconds
# verbose logs (currently only supported for hetzner)
verbose: false
2 changes: 1 addition & 1 deletion vars/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ synchronous_node_count: 1 # number of synchronous standby databases
with_haproxy_load_balancing: false # or 'true' if you want to install and configure the load-balancing

# vip-manager (if cluster_vip is specified and with_haproxy_load_balancing: false)
vip_manager_version: "0.6" # version to install
vip_manager_version: "1.0" # version to install
vip_manager_conf: "/etc/patroni/vip-manager.yml"
vip_manager_interval: "1000" # time (in milliseconds) after which vip-manager wakes up and checks if it needs to register or release ip addresses.
vip_manager_iface: "{{ vip_interface }}" # interface to which the virtual ip will be added
Expand Down

0 comments on commit 75022e9

Please sign in to comment.