Unify and standardize inventory_hostname

This was causing some confusing conflicts, so create a new fact called
"this_node" which is inventory_hostname.split('.')[0], i.e. the short
name, and use that everywhere instead of an FQDN or true inventory
hostname.
This commit is contained in:
2023-09-01 15:42:28 -04:00
parent 5de3ab0c3a
commit d24cb8a8ef
11 changed files with 47 additions and 37 deletions

View File

@ -15,9 +15,14 @@
when: newhost_check.changed
tags: always
# Set this_node fact
- set_fact:
this_node: "{{ inventory_hostname.split('.')[0] }}"
tags: always
# Set coordinator state fact
- set_fact:
is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}"
is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}"
tags: always
#

View File

@ -1,5 +1,5 @@
iface ipmi inet manual
pre-up ipmitool lan set 1 ipsrc static
pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname]['address'] }}
pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname]['netmask'] }}
pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname]['gateway'] }}
pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['address'] }}
pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['netmask'] }}
pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['gateway'] }}

View File

@ -38,13 +38,13 @@
- "{{ bridges }}"
- name: add IP addresses to upstream bridge
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream
ignore_errors: yes
- name: add IP addresses to cluster bridge
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster
ignore_errors: yes
- name: add IP addresses to storage bridge (will error if storage == cluster)
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage
ignore_errors: yes

View File

@ -1,8 +1,13 @@
---
# Set coordinator state
# Set this_node fact
- set_fact:
is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}"
this_node: "{{ inventory_hostname.split('.')[0] }}"
tags: always
# Set coordinator state fact
- set_fact:
is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}"
tags: always
# First-run check

View File

@ -16,7 +16,7 @@ line vty
! BGP EVPN mesh configuration
!
router bgp {{ pvc_asn }}
bgp router-id {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.router_id }}{% endfor %}
bgp router-id {% for node in pvc_nodes if node.hostname == this_node %}{{ node.router_id }}{% endfor %}
no bgp default ipv4-unicast
! BGP sessions with route reflectors

View File

@ -3,6 +3,6 @@
listen_tls = 0
listen_tcp = 1
listen_addr = "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %}"
listen_addr = "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %}"
tcp_port = "16509"
auth_tcp = "none"

View File

@ -3,9 +3,9 @@ namespace: /patroni/
name: {{ ansible_hostname }}
restapi:
listen: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %}
listen: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %}
connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %}
connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %}
zookeeper:
hosts: [ {% for node in pvc_nodes if node.is_coordinator %}"{{ node.cluster_ip }}:2181"{% if not loop.last %},{% endif %}{% endfor %} ]
@ -44,7 +44,7 @@ bootstrap:
postgresql:
listen: '0.0.0.0:5432'
connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:5432'
connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:5432'
{% endfor %}
log_destination: 'stderr'
log_min_messages: INFO

View File

@ -2,7 +2,7 @@
# pvcnoded configuration
# {{ ansible_managed }}
pvc:
node: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.hostname.split('.')[0] }}{% endfor %}
node: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.hostname.split('.')[0] }}{% endfor %}
debug: False
functions:
@ -55,11 +55,11 @@ pvc:
successful_fence: {{ pvc_fence_successful_action }}
failed_fence: {{ pvc_fence_failed_action }}
ipmi:
host: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_host }}{% endfor %}
host: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_host }}{% endfor %}
user: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_user }}{% endfor %}
user: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_user }}{% endfor %}
pass: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_password }}{% endfor %}
pass: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_password }}{% endfor %}
migration:
target_selector: {{ pvc_fence_migrate_target_selector }}
@ -87,17 +87,17 @@ pvc:
upstream:
device: {{ pvc_upstream_device }}
mtu: {{ pvc_upstream_mtu }}
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %}
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %}
cluster:
device: {{ pvc_cluster_device }}
mtu: {{ pvc_cluster_mtu }}
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %}
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %}
storage:
device: {{ pvc_storage_device }}
mtu: {{ pvc_storage_mtu }}
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %}
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %}
storage:
ceph_config_file: "/etc/ceph/ceph.conf"

View File

@ -21,7 +21,7 @@ autopurge.purgeInterval=1
# Listen on port 2181 on the cluster IP
clientPort=2181
clientPortAddress={% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %}
clientPortAddress={% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %}
# Node list - all coordinators
{% for node in pvc_nodes if node.is_coordinator %}