Files
pvc-ansible/group_vars/default/pvc.yml
Joshua M. Boniface f79fb605de Support using existing SSL certs on system
Add the additional pvc_api_ssl_cert_path and pvc_api_ssl_key_path
group_vars options, which can be used to set the SSL details to existing
files on the filesystem if desired. If these are empty (or nonexistent),
the original pvc_api_ssl_cert and pvc_api_ssl_key raw format options
will be used as they were.

Allows the administrator to use outside methods (such as Let's Encrypt)
to obtain the certs locally on the system, avoiding changes to the
group_vars and redeployment to manage SSL keys.
2023-09-01 15:42:24 -04:00

138 lines
7.2 KiB
YAML

---
# Logging configuration (uncomment to override defaults)
# These default options are generally best for most clusters; override these if you want more granular
# control over the logging output of the PVC system.
#pvc_log_to_file: False # Log to a file in /var/log/pvc
#pvc_log_to_stdout: True # Log to stdout (i.e. journald)
#pvc_log_colours: True # Log colourful prompts for states instead of text
#pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
#pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
#pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events
#pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events
#pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
# Timing and fencing configuration (uncomment to override defaults)
# These default options are generally best for most clusters; override these if you want more granular
# control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone.
#pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
#pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
#pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
#pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
#pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
#pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
#pvc_fence_migrate_target_selector: mem # The selector to use for migrating VMs after a fence
# Client API basic configuration
pvc_api_listen_address: "{{ pvc_upstream_floatingip }}"
pvc_api_listen_port: "7370"
pvc_api_secret_key: "" # Use pwgen to generate
# Client API user tokens
# Create a token (random UUID or password) for each user you wish to have access to the PVC API.
# The first token will always be used for the "local" connection, and thus at least one token MUST be defined.
pvc_api_enable_authentication: True
pvc_api_tokens:
- description: "myuser"
token: "a3945326-d36c-4024-83b3-2a8931d7785a"
# PVC API SSL configuration
# Use these options to enable SSL for the API listener, providing security over WAN connections.
# There are two options for defining the SSL certificate and key to use:
# a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + cert) certificate and key, respectively, on the system.
# b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + cert) certificate and key, respectively, which will be installed under /etc/pvc.
# If the _path options are non-empty, the raw entries are ignored and will not be used.
pvc_api_enable_ssl: False
pvc_api_ssl_cert_path:
pvc_api_ssl_cert: >
# A RAW CERTIFICATE FILE, installed to /etc/pvc/api-cert.pem
pvc_api_ssl_key_path:
pvc_api_ssl_key: >
# A RAW KEY FILE, installed to /etc/pvc/api-key.pem
# Ceph storage configuration
pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate
# Database configuration
pvc_dns_database_name: "pvcdns"
pvc_dns_database_user: "pvcdns"
pvc_dns_database_password: "" # Use pwgen to generate
pvc_api_database_name: "pvcapi"
pvc_api_database_user: "pvcapi"
pvc_api_database_password: "" # Use pwgen to generate
pvc_replication_database_user: "replicator"
pvc_replication_database_password: "" # Use pwgen to generate
pvc_superuser_database_user: "postgres"
pvc_superuser_database_password: "" # Use pwgen to generate
# Network routing configuration
# > The ASN should be a private ASN number.
# > The list of routers are those which will learn routes to the PVC client networks via BGP;
# they should speak BGP and allow sessions from the PVC nodes.
pvc_asn: "65500"
pvc_routers:
- "192.168.100.1"
# Node list
# > Every node configured with this playbook must be specified in this list.
pvc_nodes:
- hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname
is_coordinator: yes
node_id: 1
router_id: "192.168.100.11"
upstream_ip: "192.168.100.11"
cluster_ip: "10.0.0.1"
storage_ip: "10.0.1.1"
ipmi_host: "pvchv1-lom.{{ local_domain }}"
ipmi_user: "{{ username_ipmi_host }}"
ipmi_password: "{{ passwd_ipmi_host }}"
- hostname: "pvchv2"
is_coordinator: yes
node_id: 2
router_id: "192.168.100.12"
upstream_ip: "192.168.100.12"
cluster_ip: "10.0.0.2"
storage_ip: "10.0.1.2"
ipmi_host: "pvchv2-lom.{{ local_domain }}"
ipmi_user: "{{ username_ipmi_host }}"
ipmi_password: "{{ passwd_ipmi_host }}"
- hostname: "pvchv3"
is_coordinator: yes
node_id: 3
router_id: "192.168.100.13"
upstream_ip: "192.168.100.13"
cluster_ip: "10.0.0.3"
storage_ip: "10.0.1.3"
ipmi_host: "pvchv3-lom.{{ local_domain }}"
ipmi_user: "{{ username_ipmi_host }}"
ipmi_password: "{{ passwd_ipmi_host }}"
# Bridge device entry
# This device is passed to PVC and is used when creating bridged networks. Normal managed networks are
# created on top of the "cluster" interface defined below, however bridged networks must be created
# directly on an underlying non-vLAN network device. This can be the same underlying device as the
# upstream/cluster/storage networks (especially if the upstream network device is not a vLAN itself),
# or a different device separate from the other 3 main networks.
pvc_bridge_device: bondU # Replace based on your network configuration
# Configuration file networks
# > Taken from base.yml's configuration; do not modify this section.
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}"
pvc_upstream_domain: "{{ networks['upstream']['domain'] }}"
pvc_upstream_netmask: "{{ networks['upstream']['netmask'] }}"
pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}"
pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}"
pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}"
pvc_cluster_device: "{{ networks['cluster']['device'] }}"
pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}"
pvc_cluster_domain: "{{ networks['cluster']['domain'] }}"
pvc_cluster_netmask: "{{ networks['cluster']['netmask'] }}"
pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}"
pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}"
pvc_storage_device: "{{ networks['storage']['device'] }}"
pvc_storage_mtu: "{{ networks['storage']['mtu'] }}"
pvc_storage_domain: "{{ networks['storage']['domain'] }}"
pvc_storage_netmask: "{{ networks['storage']['netmask'] }}"
pvc_storage_subnet: "{{ networks['storage']['subnet'] }}"
pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"