Uses a much nicer CPU tuning configuration, leveraging systemd's AllowedCPUs and CPUAffinity options within a set of slices (some default, some custom). Configuration is also greatly simplified versus the previous implementation, simply asking for a number of CPUS for both the system and OSDs, and calculating everything else that is required. Also switches (back) to the v2 unified cgroup hierarchy by default as required by the systemd AllowedCPUs directive.
204 lines
12 KiB
YAML
204 lines
12 KiB
YAML
---
|
|
# Logging configuration (uncomment to override defaults)
|
|
# These default options are generally best for most clusters; override these if you want more granular
|
|
# control over the logging output of the PVC system.
|
|
#pvc_log_to_file: False # Log to a file in /var/log/pvc
|
|
#pvc_log_to_stdout: True # Log to stdout (i.e. journald)
|
|
#pvc_log_to_zookeeper: True # Log to Zookeeper (required for 'node log' commands)
|
|
#pvc_log_colours: True # Log colourful prompts for states instead of text
|
|
#pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
|
|
#pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
|
|
#pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events
|
|
#pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events
|
|
#pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
|
|
#pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands.
|
|
|
|
# Timing and fencing configuration (uncomment to override defaults)
|
|
# These default options are generally best for most clusters; override these if you want more granular
|
|
# control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone.
|
|
#pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
|
|
#pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
|
|
#pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
|
|
#pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
|
|
#pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
|
|
#pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
|
|
#pvc_migrate_target_selector: mem # The selector to use for migrating VMs if not explicitly set; one of mem, memfree, load, vcpus, vms
|
|
|
|
# Client API basic configuration
|
|
pvc_api_listen_address: "{{ pvc_upstream_floatingip }}"
|
|
pvc_api_listen_port: "7370"
|
|
pvc_api_secret_key: "" # Use pwgen to generate
|
|
|
|
# Client API user tokens
|
|
# Create a token (random UUID or password) for each user you wish to have access to the PVC API.
|
|
# The first token will always be used for the "local" connection, and thus at least one token MUST be defined.
|
|
pvc_api_enable_authentication: True
|
|
pvc_api_tokens:
|
|
# - description: "myuser"
|
|
# token: "a3945326-d36c-4024-83b3-2a8931d7785a"
|
|
|
|
# PVC API SSL configuration
|
|
# Use these options to enable SSL for the API listener, providing security over WAN connections.
|
|
# There are two options for defining the SSL certificate and key to use:
|
|
# a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + cert) certificate and key, respectively, on the system.
|
|
# b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + cert) certificate and key, respectively, which will be installed under /etc/pvc.
|
|
# If the _path options are non-empty, the raw entries are ignored and will not be used.
|
|
pvc_api_enable_ssl: False
|
|
pvc_api_ssl_cert_path:
|
|
pvc_api_ssl_cert: >
|
|
# A RAW CERTIFICATE FILE, installed to /etc/pvc/api-cert.pem
|
|
pvc_api_ssl_key_path:
|
|
pvc_api_ssl_key: >
|
|
# A RAW KEY FILE, installed to /etc/pvc/api-key.pem
|
|
|
|
# Ceph storage configuration
|
|
pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate
|
|
|
|
# Database configuration
|
|
pvc_dns_database_name: "pvcdns"
|
|
pvc_dns_database_user: "pvcdns"
|
|
pvc_dns_database_password: "" # Use pwgen to generate
|
|
pvc_api_database_name: "pvcapi"
|
|
pvc_api_database_user: "pvcapi"
|
|
pvc_api_database_password: "" # Use pwgen to generate
|
|
pvc_replication_database_user: "replicator"
|
|
pvc_replication_database_password: "" # Use pwgen to generate
|
|
pvc_superuser_database_user: "postgres"
|
|
pvc_superuser_database_password: "" # Use pwgen to generate
|
|
|
|
# Network routing configuration
|
|
# > The ASN should be a private ASN number.
|
|
# > The list of routers are those which will learn routes to the PVC client networks via BGP;
|
|
# they should speak BGP and allow sessions from the PVC nodes.
|
|
pvc_asn: "65500"
|
|
pvc_routers:
|
|
- "192.168.100.1"
|
|
|
|
# PVC Node list
|
|
# > Every node configured with this playbook must be specified in this list.
|
|
pvc_nodes:
|
|
- hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
|
is_coordinator: yes
|
|
node_id: 1
|
|
router_id: "192.168.100.11"
|
|
upstream_ip: "192.168.100.11"
|
|
cluster_ip: "10.0.0.1"
|
|
storage_ip: "10.0.1.1"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
cpu_tuning: # Example of cpu_tuning overrides per-node, only relevant if enabled; see below
|
|
system_cpus: 2
|
|
osd_cpus: 2
|
|
- hostname: "pvchv2" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
|
is_coordinator: yes
|
|
node_id: 2
|
|
router_id: "192.168.100.12"
|
|
upstream_ip: "192.168.100.12"
|
|
cluster_ip: "10.0.0.2"
|
|
storage_ip: "10.0.1.2"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
- hostname: "pvchv3" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
|
is_coordinator: yes
|
|
node_id: 3
|
|
router_id: "192.168.100.13"
|
|
upstream_ip: "192.168.100.13"
|
|
cluster_ip: "10.0.0.3"
|
|
storage_ip: "10.0.1.3"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
|
|
# Bridge device entry
|
|
# This device is passed to PVC and is used when creating bridged networks. Normal managed networks are
|
|
# created on top of the "cluster" interface defined below, however bridged networks must be created
|
|
# directly on an underlying non-vLAN network device. This can be the same underlying device as the
|
|
# upstream/cluster/storage networks (especially if the upstream network device is not a vLAN itself),
|
|
# or a different device separate from the other 3 main networks.
|
|
pvc_bridge_device: bondU # Replace based on your network configuration
|
|
pvc_bridge_mtu: 1500 # Replace based on your network configuration
|
|
|
|
# SR-IOV device configuration
|
|
# SR-IOV enables the passing of hardware-virtualized network devices (VFs), created on top of SR-IOV-enabled
|
|
# physical NICs (PFs), into virtual machines. SR-IOV is a complex topic, and will not be discussed in detail
|
|
# here. Instead, the SR-IOV mode is disabled by default and a commented out example configuration is shown.
|
|
pvc_sriov_enable: False
|
|
#pvc_sriov_device:
|
|
# - phy: ens1f0
|
|
# mtu: 9000
|
|
# vfcount: 6
|
|
|
|
# Memory tuning
|
|
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default memory
|
|
# allocations. Uncomment these options only low-memory situations (nodes with <32GB RAM).
|
|
#
|
|
# OSD memory limit - 939524096 (~900MB) is the lowest possible value; default is 4GB.
|
|
# > This option is *only* applied at cluster bootstrap and cannot be changed later
|
|
# here, only by editing the `files/ceph/<cluster>/ceph.conf` file directly.
|
|
#pvc_osd_memory_limit: 939524096
|
|
#
|
|
# Zookeeper heap memory limit, sets Xms and Xmx values to the Java process; default is 512M.
|
|
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
|
|
# Lowering the heap limit may cause poor performance or crashes in Zookeeper during some tasks.
|
|
#pvc_zookeeper_heap_limit: 128M # 1/4 of default
|
|
#
|
|
# Zookeeper stack memory limit, sets Xss value to the Java process; default is 1024M.
|
|
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
|
|
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
|
|
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
|
|
|
|
# CPU tuning
|
|
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default CPU
|
|
# allocations. Adjust these options only for clusters where CPU optimization is needed.
|
|
# > Defines CPU tuning/affinity options for various subsystems within PVC. This is useful to
|
|
# help limit the impact that noisy elements may have on other elements, e.g. busy VMs on
|
|
# OSDs, or system processes on latency-sensitive VMs.
|
|
# > To enable tuning, set enabled to yes.
|
|
# > Within "nodes", two counts are specified:
|
|
# * system_cpus: The number of CPUs to assign to the "system" slice, i.e. all non-VM,
|
|
# non-OSD processes on the system. Should usually be at least 2, and be
|
|
# higher on the coordinators of larger clusters (i.e. >5 nodes).
|
|
# * osd_cpus: The number of CPUs to assign to the "osd" slice, i.e. all OSD processes.
|
|
# Should be at least 1 per OSD, and ideally 2 per OSD for best performance.
|
|
# A third count, for the VM CPUs, is autogenerated based on the total node CPU count and
|
|
# the above two values (using all remaining CPUs).
|
|
# > Tuning is done based on cores; for systems with SMT (>1 thread-per-core), all SMTs within
|
|
# a given core are also assigned to the same CPU set. So for example, if the system assigns
|
|
# 2 system_cpus, there are 16 cores, and there are 2 threads per core, the list will be:
|
|
# 0,1,16,17
|
|
# leveraging the assumption that Linux puts all cores before all threads.
|
|
# > This tuning section under "nodes" is global to the cluster; to override these values on
|
|
# a per-node basis, use the corresponding "cpu_tuning" section of a given "pvc_nodes" entry
|
|
# as shown below.
|
|
# > If disabled after being enabled, the tuning configurations on each node will be removed
|
|
# on the next run. A reboot of all nodes is required to fully disable the tuning.
|
|
cpu_tuning:
|
|
enabled: no
|
|
nodes:
|
|
system_cpus: 2 # Set based on your actual system configuration (min 2, increase on coordinators if many nodes)
|
|
osd_cpus: 2 # Set based on your actual number of OSDs
|
|
|
|
# Configuration file networks
|
|
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
|
|
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
|
|
pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}"
|
|
pvc_upstream_domain: "{{ networks['upstream']['domain'] }}"
|
|
pvc_upstream_netmask: "{{ networks['upstream']['netmask'] }}"
|
|
pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}"
|
|
pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}"
|
|
pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}"
|
|
pvc_cluster_device: "{{ networks['cluster']['device'] }}"
|
|
pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}"
|
|
pvc_cluster_domain: "{{ networks['cluster']['domain'] }}"
|
|
pvc_cluster_netmask: "{{ networks['cluster']['netmask'] }}"
|
|
pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}"
|
|
pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}"
|
|
pvc_storage_device: "{{ networks['storage']['device'] }}"
|
|
pvc_storage_mtu: "{{ networks['storage']['mtu'] }}"
|
|
pvc_storage_domain: "{{ networks['storage']['domain'] }}"
|
|
pvc_storage_netmask: "{{ networks['storage']['netmask'] }}"
|
|
pvc_storage_subnet: "{{ networks['storage']['subnet'] }}"
|
|
pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"
|