Allows an administrator to set CPU pinning with the cpuset tool for Ceph OSDs, in situations where CPU contention with VMs or other system tasks may be negatively affecting OSD performance. This is optional, advanced tuning and is disabled by default.
218 lines
12 KiB
YAML
218 lines
12 KiB
YAML
---
|
|
# Logging configuration (uncomment to override defaults)
|
|
# These default options are generally best for most clusters; override these if you want more granular
|
|
# control over the logging output of the PVC system.
|
|
#pvc_log_to_file: False # Log to a file in /var/log/pvc
|
|
#pvc_log_to_stdout: True # Log to stdout (i.e. journald)
|
|
#pvc_log_to_zookeeper: True # Log to Zookeeper (required for 'node log' commands)
|
|
#pvc_log_colours: True # Log colourful prompts for states instead of text
|
|
#pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
|
|
#pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
|
|
#pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events
|
|
#pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events
|
|
#pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
|
|
#pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands.
|
|
|
|
# Timing and fencing configuration (uncomment to override defaults)
|
|
# These default options are generally best for most clusters; override these if you want more granular
|
|
# control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone.
|
|
#pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
|
|
#pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
|
|
#pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
|
|
#pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
|
|
#pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
|
|
#pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
|
|
#pvc_fence_migrate_target_selector: mem # The selector to use for migrating VMs after a fence
|
|
|
|
# Client API basic configuration
|
|
pvc_api_listen_address: "{{ pvc_upstream_floatingip }}"
|
|
pvc_api_listen_port: "7370"
|
|
pvc_api_secret_key: "" # Use pwgen to generate
|
|
|
|
# Client API user tokens
|
|
# Create a token (random UUID or password) for each user you wish to have access to the PVC API.
|
|
# The first token will always be used for the "local" connection, and thus at least one token MUST be defined.
|
|
pvc_api_enable_authentication: True
|
|
pvc_api_tokens:
|
|
# - description: "myuser"
|
|
# token: "a3945326-d36c-4024-83b3-2a8931d7785a"
|
|
|
|
# PVC API SSL configuration
|
|
# Use these options to enable SSL for the API listener, providing security over WAN connections.
|
|
# There are two options for defining the SSL certificate and key to use:
|
|
# a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + cert) certificate and key, respectively, on the system.
|
|
# b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + cert) certificate and key, respectively, which will be installed under /etc/pvc.
|
|
# If the _path options are non-empty, the raw entries are ignored and will not be used.
|
|
pvc_api_enable_ssl: False
|
|
pvc_api_ssl_cert_path:
|
|
pvc_api_ssl_cert: >
|
|
# A RAW CERTIFICATE FILE, installed to /etc/pvc/api-cert.pem
|
|
pvc_api_ssl_key_path:
|
|
pvc_api_ssl_key: >
|
|
# A RAW KEY FILE, installed to /etc/pvc/api-key.pem
|
|
|
|
# Ceph storage configuration
|
|
pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate
|
|
|
|
# Database configuration
|
|
pvc_dns_database_name: "pvcdns"
|
|
pvc_dns_database_user: "pvcdns"
|
|
pvc_dns_database_password: "" # Use pwgen to generate
|
|
pvc_api_database_name: "pvcapi"
|
|
pvc_api_database_user: "pvcapi"
|
|
pvc_api_database_password: "" # Use pwgen to generate
|
|
pvc_replication_database_user: "replicator"
|
|
pvc_replication_database_password: "" # Use pwgen to generate
|
|
pvc_superuser_database_user: "postgres"
|
|
pvc_superuser_database_password: "" # Use pwgen to generate
|
|
|
|
# Network routing configuration
|
|
# > The ASN should be a private ASN number.
|
|
# > The list of routers are those which will learn routes to the PVC client networks via BGP;
|
|
# they should speak BGP and allow sessions from the PVC nodes.
|
|
pvc_asn: "65500"
|
|
pvc_routers:
|
|
- "192.168.100.1"
|
|
|
|
# PVC Node list
|
|
# > Every node configured with this playbook must be specified in this list.
|
|
pvc_nodes:
|
|
- hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname
|
|
is_coordinator: yes
|
|
node_id: 1
|
|
router_id: "192.168.100.11"
|
|
upstream_ip: "192.168.100.11"
|
|
cluster_ip: "10.0.0.1"
|
|
storage_ip: "10.0.1.1"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node inventory hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
- hostname: "pvchv2"
|
|
is_coordinator: yes
|
|
node_id: 2
|
|
router_id: "192.168.100.12"
|
|
upstream_ip: "192.168.100.12"
|
|
cluster_ip: "10.0.0.2"
|
|
storage_ip: "10.0.1.2"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node inventory hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
- hostname: "pvchv3"
|
|
is_coordinator: yes
|
|
node_id: 3
|
|
router_id: "192.168.100.13"
|
|
upstream_ip: "192.168.100.13"
|
|
cluster_ip: "10.0.0.3"
|
|
storage_ip: "10.0.1.3"
|
|
ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node inventory hostname key in here
|
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
|
|
|
# Bridge device entry
|
|
# This device is passed to PVC and is used when creating bridged networks. Normal managed networks are
|
|
# created on top of the "cluster" interface defined below, however bridged networks must be created
|
|
# directly on an underlying non-vLAN network device. This can be the same underlying device as the
|
|
# upstream/cluster/storage networks (especially if the upstream network device is not a vLAN itself),
|
|
# or a different device separate from the other 3 main networks.
|
|
pvc_bridge_device: bondU # Replace based on your network configuration
|
|
|
|
# SR-IOV device configuration
|
|
# SR-IOV enables the passing of hardware-virtualized network devices (VFs), created on top of SR-IOV-enabled
|
|
# physical NICs (PFs), into virtual machines. SR-IOV is a complex topic, and will not be discussed in detail
|
|
# here. Instead, the SR-IOV mode is disabled by default and a commented out example configuration is shown.
|
|
pvc_sriov_enable: False
|
|
#pvc_sriov_device:
|
|
# - phy: ens1f0
|
|
# mtu: 9000
|
|
# vfcount: 6
|
|
|
|
# Memory tuning
|
|
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default memory
|
|
# allocations. Uncomment these options only low-memory situations (nodes with <32GB RAM).
|
|
#
|
|
# OSD memory limit - 939524096 (~900MB) is the lowest possible value; default is 4GB.
|
|
# > This option is *only* applied at cluster bootstrap and cannot be changed later
|
|
# here, only by editing the `files/ceph/<cluster>/ceph.conf` file directly.
|
|
#pvc_osd_memory_limit: 939524096
|
|
#
|
|
# Zookeeper heap memory limit, sets Xms and Xmx values to the Java process; default is 512M.
|
|
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
|
|
# Lowering the heap limit may cause poor performance or crashes in Zookeeper during some tasks.
|
|
#pvc_zookeeper_heap_limit: 128M # 1/4 of default
|
|
#
|
|
# Zookeeper stack memory limit, sets Xss value to the Java process; default is 1024M.
|
|
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
|
|
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
|
|
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
|
|
|
|
# CPU pinning configuration via cset
|
|
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default scheduling.
|
|
# > These options can be set to maximize the CPU performance of the Ceph subsystem. Because Ceph OSD
|
|
# performance is heavily limited more by CPU than anything else, for users with a lot of relatively slow CPU
|
|
# cores, or for those looking to get maximum storage performance, tuning the pinning options here might
|
|
# provide noticeable benefits.
|
|
# > This configuration makes use of the cset command and will dedicate a specific number of CPU cores to the
|
|
# Ceph OSD processes on each node. This is accomplished by using cset's shield mechanism to create a cgroup
|
|
# which will contain only Ceph OSD processes, while putting everything else onto the remaining CPUs.
|
|
# > Avoid using this tuning if you have less than 8 total CPU cores (excluding SMT threads). Otherwise, you
|
|
# might not have enough CPU cores to properly run VMs, unless you are very careful with vCPU allocation.
|
|
# > Like the 'pvc_nodes' dictionary, these options are set per-host, even if all hosts are identical. This
|
|
# is required to handle sitations where hosts might have different CPU topologies. Each host can have a
|
|
# specific set of CPUs that are included in the shield.
|
|
# > Ensure that you know which CPU cores are "real" and which are SMT "threads". This can be obtained using
|
|
# the 'virsh capabilities' command and noting the 'siblings' entries for each CPU.
|
|
# > Ensure you consider NUMA nodes when setting up this tuning. Generally speaking it is better to keep the
|
|
# OSD processes onto one NUMA node for simplicity; more advanced tuning is outside of the scope of this
|
|
# playbook.
|
|
# > You should set a number of cores in the shield (along with their respective SMT threads) equal to the
|
|
# number of OSDs in the system. This can be adjusted later as needed. For instance, if you have 2 OSDs per
|
|
# node, and each node has a 10-core SMT-capable CPU, you would want to assign cores 0 and 1 (the first two
|
|
# real cores) and 10 and 11 (the SMT siblings of those cores in 'virsh capabilities') in the cset.
|
|
#
|
|
# The shield mode is disabled by default and a commented out example configuration is shown.
|
|
pvc_shield_osds_enable: False
|
|
#pvc_shield_osds_cset:
|
|
# # This example host has 2x 6-core SMT-enabled CPUs; we want to use cores 0 (+SMT 12) and 2 (+SMT 14), which are
|
|
# # both on physical CPU 0, for 2x OSDs.
|
|
# - hostname: pvchv1
|
|
# osd_cset:
|
|
# - 0
|
|
# - 2
|
|
# - 12
|
|
# - 14
|
|
# # These example hosts have 1x 8-core SMT-enabled CPUs; we want to use cores 0 (+SMT 8) and 1 (+SMT 9) for 2x OSDs.
|
|
# - hostname: pvchv2
|
|
# osd_cset:
|
|
# - 0
|
|
# - 1
|
|
# - 8
|
|
# - 9
|
|
# - hostname: pvchv3
|
|
# osd_cset:
|
|
# - 0
|
|
# - 1
|
|
# - 8
|
|
# - 9
|
|
|
|
# Configuration file networks
|
|
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
|
|
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
|
|
pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}"
|
|
pvc_upstream_domain: "{{ networks['upstream']['domain'] }}"
|
|
pvc_upstream_netmask: "{{ networks['upstream']['netmask'] }}"
|
|
pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}"
|
|
pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}"
|
|
pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}"
|
|
pvc_cluster_device: "{{ networks['cluster']['device'] }}"
|
|
pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}"
|
|
pvc_cluster_domain: "{{ networks['cluster']['domain'] }}"
|
|
pvc_cluster_netmask: "{{ networks['cluster']['netmask'] }}"
|
|
pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}"
|
|
pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}"
|
|
pvc_storage_device: "{{ networks['storage']['device'] }}"
|
|
pvc_storage_mtu: "{{ networks['storage']['mtu'] }}"
|
|
pvc_storage_domain: "{{ networks['storage']['domain'] }}"
|
|
pvc_storage_netmask: "{{ networks['storage']['netmask'] }}"
|
|
pvc_storage_subnet: "{{ networks['storage']['subnet'] }}"
|
|
pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"
|