Compare commits

...

4 Commits

Author SHA1 Message Date
2fccbcda89 Add enhancements to autobackup
1. Add a cron mode to avoid exit(1) during cronjobs/timers
2. Revamp the remote_mount settings into auto_mount
   This removes a lot of unnecessary complexity while giving the
   administrator more flexibility in what they want to execute to mount
   a filesystem and how. The naming reflects the goal but the possible
   commands are arbitrary.
2023-10-27 02:07:24 -04:00
6ad51ea4bb Handle store exceptions in cli() function
Avoids having an unsuppressable error message in some contexts, and
provides a cleaner module.
2023-10-26 23:30:22 -04:00
5954feaa31 Add autobackup functionality to CLI
Adds autobackup (integrated, managed VM backups with automatic remote
filesystem mounting, included backup expiry/removal and automatic
full/incremental selection, independent from the manual "pvc vm backup"
commands) to the CLI client.

This is a bit of a large command to handle only inside the CLI client,
but this was chosen as it's the only real place for it aside from an
external script.

There are several major restrictions on this command, mainly that it
must be run from the primary coordinator using the "local" connection,
and that it must be run as "root".

The command is designed to run in a cron/systemd timer installed by
pvc-ansible when the appropriate group_vars are enabled, and otherwise
not touched.
2023-10-26 21:25:23 -04:00
e63d8e59e9 Install sample configs to /usr/share/pvc instead
Also clean up the old versions in the postinst as they are obsolete and
not needed going forward. These only ever served as reference for a
manual installation which itself is long-obsoleted, and thus can be put
somewhere less "important".
2023-10-26 13:00:54 -04:00
8 changed files with 484 additions and 26 deletions

View File

@@ -0,0 +1,52 @@
---
# Root level configuration key
autobackup:
# Backup root path on the node, used as the remote mountpoint
# Must be an absolute path beginning with '/'
# If remote_mount is enabled, the remote mount will be mounted on this directory
# If remote_mount is enabled, it is recommended to use a path under `/tmp` for this
# If remote_mount is disabled, a real filesystem must be mounted here (PVC system volumes are small!)
backup_root_path: "/tmp/backups"
# Suffix to the backup root path, used to allow multiple PVC systems to write to a single root path
# Must begin with '/'; leave empty to use the backup root path directly
# Note that most remote mount options can fake this if needed, but provided to ensure local compatability
backup_root_suffix: "/mycluster"
# VM tag(s) to back up
# Only VMs with at least one of the given tag(s) will be backed up; all others will be skipped
backup_tags:
- "backup"
- "mytag"
# Backup schedule: when and what format to take backups
backup_schedule:
full_interval: 7 # Number of total backups between full backups; others are incremental
# > If this number is 1, every backup will be a full backup and no incremental
# backups will be taken
# > If this number is 2, every second backup will be a full backup, etc.
full_retention: 2 # Keep this many full backups; the oldest will be deleted when a new one is
# taken, along with all child incremental backups of that backup
# > Should usually be at least 2 when using incrementals (full_interval > 1) to
# avoid there being too few backups after cleanup from a new full backup
# Automatic mount settings
# These settings permit running an arbitrary set of commands, ideally a "mount" command or similar, to
# ensure that a remote filesystem is mounted on the backup root path
# While the examples here show absolute paths, that is not required; they will run with the $PATH of the
# executing environment (either the "pvc" command on a CLI or a cron/systemd timer)
# A "{backup_root_path}" f-string/str.format type variable MAY be present in any cmds string to represent
# the above configured root backup path, which is interpolated at runtime
# If multiple commands are given, they will be executed in the order given; if no commands are given,
# nothing is executed, but the keys MUST be present
auto_mount:
enabled: no # Enable automatic mount/unmount support
# These commands are executed at the start of the backup run and should mount a filesystem
mount_cmds:
# This example shows an NFS mount leveraging the backup_root_path variable
- "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
# These commands are executed at the end of the backup run and should unmount a filesystem
unmount_cmds:
# This example shows a generic umount leveraging the backup_root_path variable
- "/usr/bin/umount {backup_root_path}"

View File

@@ -47,34 +47,17 @@ import click
###############################################################################
# Context and completion handler
# Context and completion handler, globals
###############################################################################
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120)
CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], max_content_width=MAX_CONTENT_WIDTH
)
IS_COMPLETION = True if environ.get("_PVC_COMPLETE", "") == "complete" else False
CLI_CONFIG = dict()
if not IS_COMPLETION:
cli_client_dir = environ.get("PVC_CLIENT_DIR", None)
home_dir = environ.get("HOME", None)
if cli_client_dir:
store_path = cli_client_dir
elif home_dir:
store_path = f"{home_dir}/.config/pvc"
else:
print(
"WARNING: No client or home configuration directory found; using /tmp instead"
)
store_path = "/tmp/pvc"
if not path.isdir(store_path):
makedirs(store_path)
if not path.isfile(f"{store_path}/{DEFAULT_STORE_FILENAME}"):
update_store(store_path, {"local": DEFAULT_STORE_DATA})
###############################################################################
# Local helper functions
@@ -1734,6 +1717,71 @@ def cli_vm_backup_remove(domain, backup_datestring, backup_path):
finish(retcode, retmsg)
###############################################################################
# > pvc vm autobackup
###############################################################################
@click.command(
name="autobackup", short_help="Perform automatic virtual machine backups."
)
@connection_req
@click.option(
"-f",
"--configuration",
"autobackup_cfgfile",
envvar="PVC_AUTOBACKUP_CFGFILE",
default=DEFAULT_AUTOBACKUP_FILENAME,
show_default=True,
help="Override default config file location.",
)
@click.option(
"--force-full",
"force_full_flag",
default=False,
is_flag=True,
help="Force all backups to be full backups this run.",
)
@click.option(
"--cron",
"cron_flag",
default=False,
is_flag=True,
help="Cron mode; don't error exit if this isn't the primary coordinator.",
)
def cli_vm_autobackup(autobackup_cfgfile, force_full_flag, cron_flag):
"""
Perform automated backups of VMs, with integrated cleanup and full/incremental scheduling.
This command enables automatic backup of PVC VMs at the block level, leveraging the various "pvc vm backup"
functions with an internal rentention and cleanup system as well as determination of full vs. incremental
backups at different intervals. VMs are selected based on configured VM tags. The destination storage
may either be local, or provided by a remote filesystem which is automatically mounted and unmounted during
the backup run via a set of configured commands before and after the backup run.
NOTE: This command performs its tasks in a local context. It MUST be run from the cluster's active primary
coordinator using the "local" connection only; if either is not correct, the command will error.
NOTE: This command should be run as the same user as the API daemon, usually "root" with "sudo -E" or in
a cronjob as "root", to ensure permissions are correct on the backup files. Failure to do so will still take
the backup, but the state update write will likely fail and the backup will become untracked. The command
will prompt for confirmation if it is found not to be running as "root" and this cannot be bypassed.
This command should be run from cron or a timer at a regular interval (e.g. daily, hourly, etc.) which defines
how often backups are taken. Backup format (full/incremental) and retention is based only on the number of
recorded backups, not on the time interval between them. Backups taken manually outside of the "autobackup"
command are not counted towards the format or retention of autobackups.
The PVC_AUTOBACKUP_CFGFILE envvar or "-f"/"--configuration" option can be used to override the default
configuration file path if required by a particular run. For full details of the possible options, please
see the example configuration file at "/usr/share/pvc/autobackup.sample.yaml".
The "--force-full" option can be used to force all configured VMs to perform a "full" level backup this run,
which can help synchronize the backups of existing VMs with new ones.
"""
# All work here is done in the helper function for portability; we don't even use "finish"
vm_autobackup(CLI_CONFIG, autobackup_cfgfile, force_full_flag, cron_flag)
###############################################################################
# > pvc vm tag
###############################################################################
@@ -5758,6 +5806,29 @@ def cli(
"""
global CLI_CONFIG
CLI_CONFIG["quiet"] = _quiet
CLI_CONFIG["silent"] = _silent
cli_client_dir = environ.get("PVC_CLIENT_DIR", None)
home_dir = environ.get("HOME", None)
if cli_client_dir:
store_path = cli_client_dir
elif home_dir:
store_path = f"{home_dir}/.config/pvc"
else:
echo(
CLI_CONFIG,
"WARNING: No client or home configuration directory found; using /tmp instead",
stderr=True,
)
store_path = "/tmp/pvc"
if not path.isdir(store_path):
makedirs(store_path)
if not path.isfile(f"{store_path}/{DEFAULT_STORE_FILENAME}"):
update_store(store_path, {"local": DEFAULT_STORE_DATA})
store_data = get_store(store_path)
# If the connection isn't in the store, mark it bad but pass the value
@@ -5807,6 +5878,7 @@ cli_vm_backup.add_command(cli_vm_backup_create)
cli_vm_backup.add_command(cli_vm_backup_restore)
cli_vm_backup.add_command(cli_vm_backup_remove)
cli_vm.add_command(cli_vm_backup)
cli_vm.add_command(cli_vm_autobackup)
cli_vm_tag.add_command(cli_vm_tag_get)
cli_vm_tag.add_command(cli_vm_tag_add)
cli_vm_tag.add_command(cli_vm_tag_remove)

View File

@@ -20,25 +20,33 @@
###############################################################################
from click import echo as click_echo
from click import progressbar
from click import progressbar, confirm
from datetime import datetime
from distutils.util import strtobool
from getpass import getuser
from json import load as jload
from json import dump as jdump
from os import chmod, environ, getpid, path
from os import chmod, environ, getpid, path, makedirs
from re import findall
from socket import gethostname
from subprocess import run, PIPE
from sys import argv
from syslog import syslog, openlog, closelog, LOG_AUTH
from time import sleep
from yaml import load as yload
from yaml import BaseLoader
from yaml import BaseLoader, SafeLoader
import pvc.lib.provisioner
import pvc.lib.vm
import pvc.lib.node
DEFAULT_STORE_DATA = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
DEFAULT_STORE_FILENAME = "pvc.json"
DEFAULT_API_PREFIX = "/api/v1"
DEFAULT_NODE_HOSTNAME = gethostname().split(".")[0]
DEFAULT_AUTOBACKUP_FILENAME = "/etc/pvc/autobackup.yaml"
MAX_CONTENT_WIDTH = 120
def echo(config, message, newline=True, stderr=False):
@@ -238,3 +246,322 @@ def wait_for_provisioner(CLI_CONFIG, task_id):
retdata = task_status.get("state") + ": " + task_status.get("status")
return retdata
def get_autobackup_config(CLI_CONFIG, cfgfile):
try:
config = dict()
with open(cfgfile) as fh:
backup_config = yload(fh, Loader=SafeLoader)["autobackup"]
config["backup_root_path"] = backup_config["backup_root_path"]
config["backup_root_suffix"] = backup_config["backup_root_suffix"]
config["backup_tags"] = backup_config["backup_tags"]
config["backup_schedule"] = backup_config["backup_schedule"]
config["auto_mount_enabled"] = backup_config["auto_mount"]["enabled"]
if config["auto_mount_enabled"]:
config["mount_cmds"] = list()
_mount_cmds = backup_config["auto_mount"]["mount_cmds"]
for _mount_cmd in _mount_cmds:
if "{backup_root_path}" in _mount_cmd:
_mount_cmd = _mount_cmd.format(
backup_root_path=backup_config["backup_root_path"]
)
config["mount_cmds"].append(_mount_cmd)
config["unmount_cmds"] = list()
_unmount_cmds = backup_config["auto_mount"]["unmount_cmds"]
for _unmount_cmd in _unmount_cmds:
if "{backup_root_path}" in _unmount_cmd:
_unmount_cmd = _unmount_cmd.format(
backup_root_path=backup_config["backup_root_path"]
)
config["unmount_cmds"].append(_unmount_cmd)
except FileNotFoundError:
echo(CLI_CONFIG, "ERROR: Specified backup configuration does not exist!")
exit(1)
except KeyError as e:
echo(CLI_CONFIG, f"ERROR: Backup configuration is invalid: {e}")
exit(1)
return config
def vm_autobackup(
CLI_CONFIG,
autobackup_cfgfile=DEFAULT_AUTOBACKUP_FILENAME,
force_full_flag=False,
cron_flag=False,
):
"""
Perform automatic backups of VMs based on an external config file.
"""
# Validate that we are running on the current primary coordinator of the 'local' cluster connection
real_connection = CLI_CONFIG["connection"]
CLI_CONFIG["connection"] = "local"
retcode, retdata = pvc.lib.node.node_info(CLI_CONFIG, DEFAULT_NODE_HOSTNAME)
if not retcode or retdata.get("coordinator_state") != "primary":
if cron_flag:
echo(
CLI_CONFIG,
"Current host is not the primary coordinator of the local cluster and running in cron mode. Exiting cleanly.",
)
exit(0)
else:
echo(
CLI_CONFIG,
f"ERROR: Current host is not the primary coordinator of the local cluster; got connection '{real_connection}', host '{DEFAULT_NODE_HOSTNAME}'.",
)
echo(
CLI_CONFIG,
"Autobackup MUST be run from the cluster active primary coordinator using the 'local' connection. See '-h'/'--help' for details.",
)
exit(1)
# Ensure we're running as root, or show a warning & confirmation
if getuser() != "root":
confirm(
"WARNING: You are not running this command as 'root'. This command should be run under the same user as the API daemon, which is usually 'root'. Are you sure you want to continue?",
prompt_suffix=" ",
abort=True,
)
# Load our YAML config
autobackup_config = get_autobackup_config(CLI_CONFIG, autobackup_cfgfile)
# Get a list of all VMs on the cluster
# We don't do tag filtering here, because we could match an arbitrary number of tags; instead, we
# parse the list after
retcode, retdata = pvc.lib.vm.vm_list(CLI_CONFIG, None, None, None, None, None)
if not retcode:
echo(CLI_CONFIG, f"ERROR: Failed to fetch VM list: {retdata}")
exit(1)
cluster_vms = retdata
# Parse the list to match tags; too complex for list comprehension alas
backup_vms = list()
for vm in cluster_vms:
vm_tag_names = [t["name"] for t in vm["tags"]]
matching_tags = (
True
if len(
set(vm_tag_names).intersection(set(autobackup_config["backup_tags"]))
)
> 0
else False
)
if matching_tags:
backup_vms.append(vm["name"])
if len(backup_vms) < 1:
echo(CLI_CONFIG, "Found no suitable VMs for autobackup.")
exit(0)
# Pretty print the names of the VMs we'll back up (to stderr)
maxnamelen = max([len(n) for n in backup_vms]) + 2
cols = 1
while (cols * maxnamelen + maxnamelen + 2) <= MAX_CONTENT_WIDTH:
cols += 1
rows = len(backup_vms) // cols
vm_list_rows = list()
for row in range(0, rows + 1):
row_start = row * cols
row_end = (row * cols) + cols
row_str = ""
for x in range(row_start, row_end):
if x < len(backup_vms):
row_str += "{:<{}}".format(backup_vms[x], maxnamelen)
vm_list_rows.append(row_str)
echo(CLI_CONFIG, f"Found {len(backup_vms)} suitable VM(s) for autobackup.")
echo(CLI_CONFIG, "Full VM list:", stderr=True)
echo(CLI_CONFIG, " {}".format("\n ".join(vm_list_rows)), stderr=True)
echo(CLI_CONFIG, "", stderr=True)
if autobackup_config["auto_mount_enabled"]:
# Execute each mount_cmds command in sequence
for cmd in autobackup_config["mount_cmds"]:
echo(
CLI_CONFIG,
f"Executing mount command '{cmd.split()[0]}'... ",
newline=False,
)
tstart = datetime.now()
ret = run(
cmd.split(),
stdout=PIPE,
stderr=PIPE,
)
tend = datetime.now()
ttot = tend - tstart
if ret.returncode != 0:
echo(
CLI_CONFIG,
f"failed. [{ttot.seconds}s]",
)
echo(
CLI_CONFIG,
f"Exiting; command reports: {ret.stderr.decode().strip()}",
)
exit(1)
else:
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")
# For each VM, perform the backup
for vm in backup_vms:
backup_suffixed_path = f"{autobackup_config['backup_root_path']}{autobackup_config['backup_root_suffix']}"
if not path.exists(backup_suffixed_path):
makedirs(backup_suffixed_path)
backup_path = f"{backup_suffixed_path}/{vm}"
autobackup_state_file = f"{backup_path}/.autobackup.json"
if not path.exists(backup_path) or not path.exists(autobackup_state_file):
# There are no new backups so the list is empty
state_data = dict()
tracked_backups = list()
else:
with open(autobackup_state_file) as fh:
state_data = jload(fh)
tracked_backups = state_data["tracked_backups"]
full_interval = autobackup_config["backup_schedule"]["full_interval"]
full_retention = autobackup_config["backup_schedule"]["full_retention"]
full_backups = [b for b in tracked_backups if b["type"] == "full"]
if len(full_backups) > 0:
last_full_backup = full_backups[0]
last_full_backup_idx = tracked_backups.index(last_full_backup)
if force_full_flag:
this_backup_type = "forced-full"
this_backup_incremental_parent = None
this_backup_retain_snapshot = True
elif last_full_backup_idx >= full_interval - 1:
this_backup_type = "full"
this_backup_incremental_parent = None
this_backup_retain_snapshot = True
else:
this_backup_type = "incremental"
this_backup_incremental_parent = last_full_backup["datestring"]
this_backup_retain_snapshot = False
else:
# The very first backup must be full to start the tree
this_backup_type = "full"
this_backup_incremental_parent = None
this_backup_retain_snapshot = True
# Perform the backup
echo(
CLI_CONFIG,
f"Backing up VM '{vm}' ({this_backup_type})... ",
newline=False,
)
tstart = datetime.now()
retcode, retdata = pvc.lib.vm.vm_backup(
CLI_CONFIG,
vm,
backup_suffixed_path,
incremental_parent=this_backup_incremental_parent,
retain_snapshot=this_backup_retain_snapshot,
)
tend = datetime.now()
ttot = tend - tstart
if not retcode:
echo(CLI_CONFIG, f"failed. [{ttot.seconds}s]")
echo(CLI_CONFIG, f"Skipping cleanups; command reports: {retdata}")
continue
else:
backup_datestring = findall(r"[0-9]{14}", retdata)[0]
echo(
CLI_CONFIG,
f"done. Backup '{backup_datestring}' created. [{ttot.seconds}s]",
)
# Read backup file to get details
backup_json_file = f"{backup_path}/{backup_datestring}/pvcbackup.json"
with open(backup_json_file) as fh:
backup_json = jload(fh)
backup = {
"datestring": backup_json["datestring"],
"type": backup_json["type"],
"parent": backup_json["incremental_parent"],
"retained_snapshot": backup_json["retained_snapshot"],
}
tracked_backups.insert(0, backup)
# Delete any full backups that are expired
marked_for_deletion = list()
found_full_count = 0
for backup in tracked_backups:
if backup["type"] == "full":
found_full_count += 1
if found_full_count > full_retention:
marked_for_deletion.append(backup)
# Depete any incremental backups that depend on marked parents
for backup in tracked_backups:
if backup["type"] == "incremental" and backup["parent"] in [
b["datestring"] for b in marked_for_deletion
]:
marked_for_deletion.append(backup)
# Execute deletes
for backup_to_delete in marked_for_deletion:
echo(
CLI_CONFIG,
f"Removing old VM '{vm}' backup '{backup_to_delete['datestring']}' ({backup_to_delete['type']})... ",
newline=False,
)
tstart = datetime.now()
retcode, retdata = pvc.lib.vm.vm_remove_backup(
CLI_CONFIG,
vm,
backup_suffixed_path,
backup_to_delete["datestring"],
)
tend = datetime.now()
ttot = tend - tstart
if not retcode:
echo(CLI_CONFIG, f"failed. [{ttot.seconds}s]")
echo(
CLI_CONFIG,
f"Skipping removal from tracked backups; command reports: {retdata}",
)
continue
else:
tracked_backups.remove(backup_to_delete)
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")
# Update tracked state information
state_data["tracked_backups"] = tracked_backups
with open(autobackup_state_file, "w") as fh:
jdump(state_data, fh)
if autobackup_config["auto_mount_enabled"]:
# Execute each unmount_cmds command in sequence
for cmd in autobackup_config["unmount_cmds"]:
echo(
CLI_CONFIG,
f"Executing unmount command '{cmd.split()[0]}'... ",
newline=False,
)
tstart = datetime.now()
ret = run(
cmd.split(),
stdout=PIPE,
stderr=PIPE,
)
tend = datetime.now()
ttot = tend - tstart
if ret.returncode != 0:
echo(
CLI_CONFIG,
f"failed. [{ttot.seconds}s]",
)
echo(
CLI_CONFIG,
f"Continuing; command reports: {ret.stderr.decode().strip()}",
)
else:
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")

View File

@@ -0,0 +1 @@
client-cli/autobackup.sample.yaml usr/share/pvc

View File

@@ -1,7 +1,7 @@
api-daemon/pvcapid.py usr/share/pvc
api-daemon/pvcapid-manage*.py usr/share/pvc
api-daemon/pvc-api-db-upgrade usr/share/pvc
api-daemon/pvcapid.sample.yaml etc/pvc
api-daemon/pvcapid.sample.yaml usr/share/pvc
api-daemon/pvcapid usr/share/pvc
api-daemon/pvcapid.service lib/systemd/system
api-daemon/pvcapid-worker.service lib/systemd/system

View File

@@ -18,3 +18,6 @@ fi
if [ ! -f /etc/pvc/pvcapid.yaml ]; then
echo "NOTE: The PVC client API daemon (pvcapid.service) and the PVC provisioner worker daemon (pvcapid-worker.service) have not been started; create a config file at /etc/pvc/pvcapid.yaml, then run the database configuration (/usr/share/pvc/pvc-api-db-upgrade) and start them manually."
fi
# Clean up any old sample configs
rm /etc/pvc/pvcapid.sample.yaml || true

View File

@@ -1,5 +1,5 @@
node-daemon/pvcnoded.py usr/share/pvc
node-daemon/pvcnoded.sample.yaml etc/pvc
node-daemon/pvcnoded.sample.yaml usr/share/pvc
node-daemon/pvcnoded usr/share/pvc
node-daemon/pvcnoded.service lib/systemd/system
node-daemon/pvc.target lib/systemd/system

View File

@@ -14,3 +14,6 @@ if systemctl is-active --quiet pvcnoded.service; then
else
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been started; create a config file at /etc/pvc/pvcnoded.yaml then start it."
fi
# Clean up any old sample configs
rm /etc/pvc/pvcnoded.sample.yaml || true