Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
7a32d8da9d | |||
e3b8673789 | |||
9db46d48e4 | |||
d74c3a2d45 | |||
f4e946c262 | |||
31254e8174 | |||
82cef60078 | |||
47098ee074 | |||
76c1e7040f | |||
f6fc08dac3 | |||
f8831ee84c | |||
d7f40ba1aa | |||
08227ba0f4 | |||
ba4349d289 | |||
b525bbe81d | |||
078d48a50b | |||
cebc660fb0 |
12
CHANGELOG.md
12
CHANGELOG.md
@ -1,5 +1,17 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.105](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.105)
|
||||
|
||||
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling
|
||||
|
||||
###### [v0.9.104](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.104)
|
||||
|
||||
* [API Daemon] Fixes a bug that failed uploading of RAW block devices in "storage volume upload"
|
||||
* [API Daemon/CLI Client] Adds support for VM automirrors, replicating the functionality of autobackup but for cross-cluster mirroring
|
||||
* [CLI Client] Improves the help output of several commands
|
||||
* [API Daemon/CLI Client] Moves VM snapshot age conversions to human-readable values out of the API and into the client to open up more programatic handling in the future
|
||||
* [Worker Daemon] Improves the Celery logging output clarity by including the calling function in any task output
|
||||
|
||||
###### [v0.9.103](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.103)
|
||||
|
||||
* [Provisioner] Fixes a bug with the change in `storage_hosts` to FQDNs affecting the VM Builder
|
||||
|
@ -1783,7 +1783,7 @@ class API_VM_Root(Resource):
|
||||
description: Unix timestamp of the snapshot
|
||||
age:
|
||||
type: string
|
||||
description: Human-readable age of the snapshot in the largest viable unit (seconds, minutes, hours, days)
|
||||
description: Age of the snapshot in seconds
|
||||
rbd_snapshots:
|
||||
type: array
|
||||
items:
|
||||
@ -4533,6 +4533,76 @@ class API_VM_Autobackup_Root(Resource):
|
||||
api.add_resource(API_VM_Autobackup_Root, "/vm/autobackup")
|
||||
|
||||
|
||||
# /vm/automirror
|
||||
class API_VM_Automirror_Root(Resource):
|
||||
@RequestParser(
|
||||
[
|
||||
{"name": "email_recipients"},
|
||||
{
|
||||
"name": "email_errors_only",
|
||||
"required": False,
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
def post(self, reqargs):
|
||||
"""
|
||||
Trigger a cluster automirror job
|
||||
---
|
||||
tags:
|
||||
- provisioner
|
||||
parameters:
|
||||
- in: query
|
||||
name: email_recipients
|
||||
type: string
|
||||
required: false
|
||||
description: A list of email addresses to send failure and report emails to, comma-separated
|
||||
- in: query
|
||||
name: email_errors_only
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
description: If set and true, only sends a report email to email_recipients when there is an error with at least one mirror
|
||||
responses:
|
||||
202:
|
||||
description: Accepted
|
||||
schema:
|
||||
type: object
|
||||
description: The Celery job information of the task
|
||||
id: CeleryTask
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
|
||||
email_recipients = reqargs.get("email_recipients", None)
|
||||
if email_recipients is not None and not isinstance(email_recipients, list):
|
||||
email_recipients = [email_recipients]
|
||||
|
||||
email_errors_only = bool(strtobool(reqargs.get("email_errors_only", "False")))
|
||||
|
||||
task = run_celery_task(
|
||||
"cluster.automirror",
|
||||
email_recipients=email_recipients,
|
||||
email_errors_only=email_errors_only,
|
||||
run_on="primary",
|
||||
)
|
||||
return (
|
||||
{
|
||||
"task_id": task.id,
|
||||
"task_name": "cluster.automirror",
|
||||
"run_on": f"{get_primary_node()} (primary)",
|
||||
},
|
||||
202,
|
||||
{"Location": Api.url_for(api, API_Tasks_Element, task_id=task.id)},
|
||||
)
|
||||
|
||||
|
||||
api.add_resource(API_VM_Automirror_Root, "/vm/automirror")
|
||||
|
||||
|
||||
##########################################################
|
||||
# Client API - Network
|
||||
##########################################################
|
||||
|
@ -2492,7 +2492,7 @@ def ceph_volume_upload(zkhandler, pool, volume, img_type, file_size=None):
|
||||
)
|
||||
|
||||
if img_type == "raw":
|
||||
if file_size != dev_size:
|
||||
if file_size is not None and file_size != dev_size:
|
||||
output = {
|
||||
"message": f"Image file size {file_size} does not match volume size {dev_size}"
|
||||
}
|
||||
|
@ -179,6 +179,10 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
}
|
||||
retcode = 400
|
||||
return output, retcode
|
||||
else:
|
||||
ova_script = "default_ova"
|
||||
else:
|
||||
ova_script = "ova"
|
||||
|
||||
ova_archive = None
|
||||
|
||||
@ -397,7 +401,14 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
vnc = False
|
||||
serial = True
|
||||
retdata, retcode = provisioner.create_template_system(
|
||||
name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id
|
||||
name,
|
||||
vcpu_count,
|
||||
vram_mb,
|
||||
serial,
|
||||
vnc,
|
||||
vnc_bind=None,
|
||||
ova=ova_id,
|
||||
migration_max_downtime=300,
|
||||
)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
@ -414,7 +425,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
None,
|
||||
None,
|
||||
userdata=None,
|
||||
script="default_ova",
|
||||
script=ova_script,
|
||||
ova=name,
|
||||
arguments=None,
|
||||
)
|
||||
|
@ -221,7 +221,7 @@ def create_template_system(
|
||||
node_selector=None,
|
||||
node_autostart=False,
|
||||
migration_method=None,
|
||||
migration_max_downtime=None,
|
||||
migration_max_downtime=300,
|
||||
ova=None,
|
||||
):
|
||||
if list_template_system(name, is_fuzzy=False)[-1] != 404:
|
||||
|
@ -1800,7 +1800,7 @@ def cli_vm_flush_locks(domain, wait_flag):
|
||||
"""
|
||||
Flush stale RBD locks for virtual machine DOMAIN. DOMAIN may be a UUID or name. DOMAIN must be in the stop, disable, or fail state before flushing locks.
|
||||
|
||||
NOTE: This is a task-based command. The "--wait" flag (default) will block and show progress. Specifying the "--no-wait" flag will return immediately with a job ID instead, which can be queried externally later.
|
||||
(†) NOTE: This is a task-based command. The "--wait" flag (default) will block and show progress. Specifying the "--no-wait" flag will return immediately with a job ID instead, which can be queried externally later.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.vm.vm_locks(CLI_CONFIG, domain, wait_flag=wait_flag)
|
||||
@ -1845,7 +1845,7 @@ def cli_vm_snapshot_create(domain, snapshot_name, wait_flag):
|
||||
Create a snapshot of the disks and XML configuration of virtual machine DOMAIN, with the
|
||||
optional name SNAPSHOT_NAME. DOMAIN may be a UUID or name.
|
||||
|
||||
WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
(!) WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
of a running VM, restoring that snapshot will be equivalent to having forcibly restarted the
|
||||
VM at the moment of the snapshot.
|
||||
"""
|
||||
@ -2019,7 +2019,7 @@ def cli_vm_snapshot_import(
|
||||
|
||||
If the "-r"/"--retain-snapshot" option is specified (the default), for incremental imports, only the parent snapshot is kept; for full imports, the imported snapshot is kept. If the "-R"/"--remove-snapshot" option is specified, the imported snapshot is removed.
|
||||
|
||||
WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental snapshots based on the same incremental parent for the imported VM.
|
||||
(!) WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental snapshots based on the same incremental parent for the imported VM.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.vm.vm_import_snapshot(
|
||||
@ -2088,17 +2088,17 @@ def cli_vm_snapshot_send(
|
||||
"""
|
||||
Send the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the remote PVC cluster DESTINATION.
|
||||
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option.
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option. SSL validation will be inferred from this CLI instance's configuration.
|
||||
|
||||
The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
|
||||
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify one with the "-p"/"--detination-pool" option.
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify a valid pool name on the destination with the "-p"/"--detination-pool" option.
|
||||
|
||||
Incremental sends are possible by specifying the "-i"/"--incremental-parent" option along with a parent snapshot name. To correctly receive, that parent snapshot must exist on DESTINATION. Subsequent sends after the first do not have to be incremental, but an incremental send is likely to perform better than a full send if the VM experiences few writes.
|
||||
|
||||
WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends.
|
||||
(!) WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends.
|
||||
|
||||
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
(!) WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
"""
|
||||
|
||||
connections_config = get_store(CLI_CONFIG["store_path"])
|
||||
@ -2191,19 +2191,21 @@ def cli_vm_mirror_create(
|
||||
wait_flag,
|
||||
):
|
||||
"""
|
||||
For the virtual machine DOMAIN: create a new snapshot (dated), and send snapshot to the remote PVC cluster DESTINATION; creates a cross-cluster snapshot mirror of the VM.
|
||||
For the virtual machine DOMAIN, create a new snapshot and send that snapshot to the remote PVC cluster DESTINATION; creates a cross-cluster snapshot mirror of the VM.
|
||||
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option.
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option. SSL validation will be inferred from this CLI instance's configuration.
|
||||
|
||||
The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
|
||||
This command will create snapshots named in the format "mrYYYYMMDDHHMMSS" to differentiate them from manually-created snapshots. The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
|
||||
|
||||
This command may be used repeatedly to send new updates for a remote VM mirror. If a valid shared snapshot is found on the destination cluster, block device transfers will be incremental based on that snapshot.
|
||||
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify one with the "-p"/"--detination-pool" option.
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify a valid pool name on the destination with the "-p"/"--detination-pool" option.
|
||||
|
||||
WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends. Consider using "mirror promote" instead of any manual promotion attempts.
|
||||
(†) NOTE: Any configured autobackup and automirror tag(s) will be removed from the VM metadata on the remote cluster to prevent possible loops and because configurations may differ between clusters.
|
||||
|
||||
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
(!) WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances; THIS WILL CAUSE DATA LOSS ON THE SOURCE CLUSTER. To avoid this, use "mirror promote" instead of attempting a manual promotion. Only VMs in the "mirror" state on the target can accept sends.
|
||||
|
||||
(!) WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
"""
|
||||
|
||||
connections_config = get_store(CLI_CONFIG["store_path"])
|
||||
@ -2289,19 +2291,23 @@ def cli_vm_mirror_promote(
|
||||
wait_flag,
|
||||
):
|
||||
"""
|
||||
For the virtual machine DOMAIN: shut down on this cluster, create a new snapshot (dated), send snapshot to the remote PVC cluster DESTINATION, start on DESTINATION, and optionally remove from this cluster; performs a cross-cluster move of the VM, with or without retaining the source as a snapshot mirror.
|
||||
For the virtual machine DOMAIN, shut down the VM on this cluster, create a new snapshot, send that snapshot to the remote PVC cluster DESTINATION, start the VM on DESTINATION, and optionally remove the VM from this cluster; performs a cross-cluster "move" of the VM, with or without retaining the source as a snapshot mirror.
|
||||
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option.
|
||||
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option. SSL validation will be inferred from this CLI instance's configuration.
|
||||
|
||||
The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
|
||||
This command will create snapshots named in the format "mrYYYYMMDDHHMMSS" to differentiate them from manually-created snapshots. The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
|
||||
|
||||
If a valid shared snapshot is found on the destination cluster, block device transfers will be incremental based on that snapshot.
|
||||
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify one with the "-p"/"--detination-pool" option.
|
||||
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify a valid pool name on the destination with the "-p"/"--detination-pool" option.
|
||||
|
||||
WARNING: Once promoted, if the "--remove" flag is not set, the VM will be in the state "mirror" on this cluster. This effectively flips which cluster is the "primary" for this VM, and subsequent mirror management commands must be run against the destination cluster instead of this cluster. If the "--remove" flag is set, the VM will be removed from this cluster entirely once successfully started on the destination cluster.
|
||||
(†) NOTE: Any configured autobackup and automirror tag(s) will be removed from the VM metadata on the remote cluster to prevent possible loops and because configurations may differ between clusters.
|
||||
|
||||
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
(!) WARNING: The VM will experience some amount of downtime during the promotion.
|
||||
|
||||
(!) WARNING: Once promoted, if the "--remove" flag is not set, the VM will be in the state "mirror" on this cluster. This effectively flips which cluster is the "primary" for this VM, and subsequent mirror management commands must be run against the destination cluster instead of this cluster. If the "--remove" flag is set, the VM will be removed from this cluster entirely once successfully started on the destination cluster.
|
||||
|
||||
(!) WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
|
||||
"""
|
||||
|
||||
connections_config = get_store(CLI_CONFIG["store_path"])
|
||||
@ -2439,7 +2445,7 @@ def cli_vm_backup_restore(domain, backup_datestring, backup_path, retain_snapsho
|
||||
|
||||
If the "-r"/"--retain-snapshot" option is specified (the default), for incremental restores, only the parent snapshot is kept; for full restores, the restored snapshot is kept. If the "-R"/"--remove-snapshot" option is specified, the imported snapshot is removed.
|
||||
|
||||
WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental backups based on the same incremental parent for the restored VM.
|
||||
(!) WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental backups based on the same incremental parent for the restored VM.
|
||||
"""
|
||||
|
||||
echo(
|
||||
@ -2471,7 +2477,7 @@ def cli_vm_backup_remove(domain, backup_datestring, backup_path):
|
||||
|
||||
Remove the backup BACKUP_DATESTRING, including snapshots, of virtual machine DOMAIN stored in BACKUP_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name.
|
||||
|
||||
WARNING: Removing an incremental parent will invalidate any existing incremental backups based on that backup.
|
||||
(!) WARNING: Removing an incremental parent will invalidate any existing incremental backups based on that backup.
|
||||
"""
|
||||
|
||||
echo(
|
||||
@ -2540,17 +2546,21 @@ def cli_vm_autobackup(email_report, force_full_flag, wait_flag, cron_flag):
|
||||
recorded backups, not on the time interval between them. Exports taken manually outside of the "autobackup"
|
||||
command are not counted towards the format or retention of autobackups.
|
||||
|
||||
WARNING: Running this command manually will interfere with the schedule! Do not run manually except for testing.
|
||||
(!) WARNING: Running this command manually will interfere with the schedule! Do not run manually except for testing.
|
||||
|
||||
The actual details of the autobackup, including retention policies, full-vs-incremental, pre- and post- run
|
||||
mounting/unmounting commands, etc. are defined in the main PVC configuration file `/etc/pvc/pvc.conf`. See
|
||||
the sample configuration for more details.
|
||||
the sample configuration for more details on how to configure autobackups.
|
||||
|
||||
An optional report on all current backups can be emailed to one or more email addresses using the
|
||||
"--email-report" flag. This report will include information on all current known backups.
|
||||
|
||||
The "--force-full" option can be used to force all configured VMs to perform a "full" level backup this run,
|
||||
which can help synchronize the backups of existing VMs with new ones.
|
||||
|
||||
This command will create snapshots named in the format "abYYYYMMDDHHMMSS" to differentiate them from manually-created snapshots.
|
||||
|
||||
For more details on VM snapshot exports, see "pvc vm snapshot export --help".
|
||||
"""
|
||||
|
||||
if cron_flag:
|
||||
@ -2577,6 +2587,94 @@ def cli_vm_autobackup(email_report, force_full_flag, wait_flag, cron_flag):
|
||||
finish(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# > pvc vm automirror
|
||||
###############################################################################
|
||||
@click.command(
|
||||
name="automirror", short_help="Perform automatic virtual machine mirrors."
|
||||
)
|
||||
@connection_req
|
||||
@click.option(
|
||||
"--email-report",
|
||||
"email_report",
|
||||
default=None,
|
||||
help="Email a mirror summary report to the specified address(es), comma-separated.",
|
||||
)
|
||||
@click.option(
|
||||
"--email-errors-only",
|
||||
"email_errors_only_flag",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
show_default=True,
|
||||
help="Only send a mirror summary report when at least one error occurrs.",
|
||||
)
|
||||
@click.option(
|
||||
"--wait/--no-wait",
|
||||
"wait_flag",
|
||||
is_flag=True,
|
||||
default=True,
|
||||
show_default=True,
|
||||
help="Wait or don't wait for task to complete, showing progress if waiting.",
|
||||
)
|
||||
@click.option(
|
||||
"--cron",
|
||||
"cron_flag",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
show_default=True,
|
||||
help="Run in cron mode (returns immediately with no output once job is submitted).",
|
||||
)
|
||||
def cli_vm_automirror(email_report, email_errors_only_flag, wait_flag, cron_flag):
|
||||
"""
|
||||
Perform automated mirrors of VMs, with integrated cleanup and full/incremental scheduling.
|
||||
|
||||
This command enables automatic mirrors of PVC VMs at the block level, leveraging the various "pvc vm snapshot"
|
||||
functions with an internal rentention and cleanup system. VMs and the destination cluster(s) are selected based
|
||||
on configured VM tags and a set of static configs in the cluster's `pvc.conf` configuration.
|
||||
|
||||
This command should be run from cron or a timer at a regular interval (e.g. daily, hourly, etc.) which defines
|
||||
how often mirrors are taken. Mirror retention is based only on the number of recorded mirrors on the remote side,
|
||||
not on the time interval between them. Mirrors taken manually outside of the "automirror" command are not counted
|
||||
towards the format or retention of automirrors.
|
||||
|
||||
(!) WARNING: Running this command manually will interfere with the schedule! Do not run manually except for testing.
|
||||
|
||||
The actual details of the automirror, including retention policies, are defined in the main PVC configuration file
|
||||
`/etc/pvc/pvc.conf`. See the sample configuration for more details on how to configure automirrors.
|
||||
|
||||
An optional report on the job's results can be emailed to one or more email addresses using the "--email-report" flag.
|
||||
By default, reports are sent for all mirror jobs; reporting only on jobs that experience one or more errors can be
|
||||
specified using the "--email-errors-only" flag.
|
||||
|
||||
This command will create snapshots named in the format "amYYYYMMDDHHMMSS" to differentiate them from manually-created snapshots.
|
||||
|
||||
For more details on VM mirrors, see "pvc vm mirror create --help".
|
||||
"""
|
||||
|
||||
if cron_flag:
|
||||
wait_flag = False
|
||||
|
||||
if email_report is not None:
|
||||
email_recipients = email_report.split(",")
|
||||
else:
|
||||
email_recipients = None
|
||||
|
||||
retcode, retmsg = pvc.lib.vm.vm_automirror(
|
||||
CLI_CONFIG,
|
||||
email_recipients=email_recipients,
|
||||
email_errors_only_flag=email_errors_only_flag,
|
||||
wait_flag=wait_flag,
|
||||
)
|
||||
|
||||
if retcode and wait_flag:
|
||||
retmsg = wait_for_celery_task(CLI_CONFIG, retmsg)
|
||||
|
||||
if cron_flag:
|
||||
finish(retcode, None)
|
||||
else:
|
||||
finish(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# > pvc vm tag
|
||||
###############################################################################
|
||||
@ -2897,7 +2995,7 @@ def cli_vm_network_add(
|
||||
|
||||
NET may be a PVC network VNI, which is added as a bridged device, or a SR-IOV VF device connected in the given mode.
|
||||
|
||||
NOTE: Adding a SR-IOV network device in the "hostdev" mode has the following caveats:
|
||||
(†) NOTE: Adding a SR-IOV network device in the "hostdev" mode has the following caveats:
|
||||
|
||||
1. The VM will not be able to be live migrated; it must be shut down to migrate between nodes. The VM metadata will be updated to force this.
|
||||
|
||||
@ -3248,7 +3346,7 @@ def cli_vm_list(target_node, target_state, target_tag, limit, negate, format_fun
|
||||
"""
|
||||
List all virtual machines; optionally only match names or full UUIDs matching regex LIMIT.
|
||||
|
||||
NOTE: Red-coloured network lists indicate one or more configured networks are missing/invalid.
|
||||
(†) NOTE: Red-coloured network lists indicate one or more configured networks are missing/invalid.
|
||||
"""
|
||||
|
||||
retcode, retdata = pvc.lib.vm.vm_list(
|
||||
@ -3362,7 +3460,7 @@ def cli_network_add(
|
||||
"""
|
||||
Add a new virtual network with VXLAN identifier VNI.
|
||||
|
||||
NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. If unset, defaults to the underlying device MTU which will be set explcitly when the network is added to the nodes.
|
||||
(†) NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. If unset, defaults to the underlying device MTU which will be set explcitly when the network is added to the nodes.
|
||||
|
||||
Examples:
|
||||
|
||||
@ -3475,7 +3573,7 @@ def cli_network_modify(
|
||||
"""
|
||||
Modify details of virtual network VNI. All fields optional; only specified fields will be updated.
|
||||
|
||||
NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. To reset an explicit MTU to the default underlying device MTU, specify '--mtu' with a quoted empty string argument.
|
||||
(†) NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. To reset an explicit MTU to the default underlying device MTU, specify '--mtu' with a quoted empty string argument.
|
||||
|
||||
Example:
|
||||
|
||||
@ -3511,7 +3609,7 @@ def cli_network_remove(net):
|
||||
"""
|
||||
Remove an existing virtual network NET; NET must be a VNI.
|
||||
|
||||
WARNING: PVC does not verify whether clients are still present in this network. Before removing, ensure
|
||||
(!) WARNING: PVC does not verify whether clients are still present in this network. Before removing, ensure
|
||||
that all client VMs have been removed from the network or undefined behaviour may occur.
|
||||
"""
|
||||
|
||||
@ -3700,11 +3798,11 @@ def cli_network_acl_add(net, direction, description, rule, order):
|
||||
"""
|
||||
Add a new NFT firewall rule to network NET; the rule is a literal NFT rule belonging to the forward table for the client network; NET must be a VNI.
|
||||
|
||||
NOTE: All client networks are default-allow in both directions; deny rules MUST be added here at the end of the sequence for a default-deny setup.
|
||||
(†) NOTE: All client networks are default-allow in both directions; deny rules MUST be added here at the end of the sequence for a default-deny setup.
|
||||
|
||||
NOTE: Ordering places the rule at the specified ID, not before it; the old rule of that ID and all subsequent rules will be moved down.
|
||||
(†) NOTE: Ordering places the rule at the specified ID, not before it; the old rule of that ID and all subsequent rules will be moved down.
|
||||
|
||||
NOTE: Descriptions are used as names, and must be unique within a network (both directions).
|
||||
(†) NOTE: Descriptions are used as names, and must be unique within a network (both directions).
|
||||
|
||||
Example:
|
||||
|
||||
@ -4194,7 +4292,7 @@ def cli_storage_osd_create_db_vg(node, device, wait_flag):
|
||||
|
||||
Only one OSD database volume group on a single physical device, named "osd-db", is supported per node, so it must be fast and large enough to act as an effective OSD database device for all OSDs on the node. Attempting to add additional database volume groups after the first will result in an error.
|
||||
|
||||
WARNING: If the OSD database device fails, all OSDs on the node using it will be lost and must be recreated.
|
||||
(!) WARNING: If the OSD database device fails, all OSDs on the node using it will be lost and must be recreated.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.storage.ceph_osd_db_vg_add(
|
||||
@ -4269,13 +4367,13 @@ def cli_storage_osd_add(
|
||||
|
||||
The "-r"/"--ext-db-ratio" or "-s"/"--ext-db-size" options, if specified, and if a OSD DB VG exists on the node (see "pvc storage osd create-db-vg"), will instruct the OSD to locate its RocksDB database and WAL on a new logical volume on that OSD DB VG. If "-r"/"--ext-db-ratio" is specified, the sizing of this DB LV will be the given ratio (specified as a decimal percentage e.g. 0.05 for 5%) of the size of the OSD (e.g. 0.05 on a 1TB SSD will create a 50GB LV). If "-s"/"--ext-db-size" is specified, the sizing of this DB LV will be the given human-unit size (e.g. 1024M, 20GB, etc.). An 0.05 ratio is recommended; at least 0.02 is required, and more than 0.05 can potentially increase performance in write-heavy workloads.
|
||||
|
||||
WARNING: An external DB carries important caveats. An external DB is only suggested for relatively slow OSD devices (e.g. SATA SSDs) when there is also a much faster, more robust, but smaller storage device in the system (e.g. an NVMe or 3DXPoint SSD) which can accelerate the OSD. An external DB is NOT recommended for NVMe OSDs as this will hamper performance and reliability. Additionally, it is important to note that the OSD will depend entirely on this external DB device; they cannot be separated without destroying the OSD, and the OSD cannot function without the external DB device, thus introducting a single point of failure. Use this feature with extreme care.
|
||||
(!) WARNING: An external DB carries important caveats. An external DB is only suggested for relatively slow OSD devices (e.g. SATA SSDs) when there is also a much faster, more robust, but smaller storage device in the system (e.g. an NVMe or 3DXPoint SSD) which can accelerate the OSD. An external DB is NOT recommended for NVMe OSDs as this will hamper performance and reliability. Additionally, it is important to note that the OSD will depend entirely on this external DB device; they cannot be separated without destroying the OSD, and the OSD cannot function without the external DB device, thus introducting a single point of failure. Use this feature with extreme care.
|
||||
|
||||
The "-c"/"--osd-count" option allows the splitting of a single block device into multiple logical OSDs. This is recommended in the Ceph literature for extremely fast OSD block devices (i.e. NVMe or 3DXPoint) which can saturate a single OSD process. Usually, 2 or 4 OSDs is recommended, based on the size and performance of the OSD disk; more than 4 OSDs per volume is not recommended, and this option is not recommended for SATA SSDs.
|
||||
|
||||
Note that, if "-c"/"--osd-count" is specified, the provided "-w"/"--weight" will be the weight of EACH created OSD, not the block device as a whole. Ensure you take this into account if mixing and matching OSD block devices. Additionally, if "-r"/"--ext-db-ratio" or "-s"/"--ext-db-size" is specified, one DB LV will be created for EACH created OSD, of the given ratio/size per OSD; ratios are calculated from the OSD size, not the underlying device.
|
||||
|
||||
NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
(†) NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.storage.ceph_osd_add(
|
||||
@ -4350,15 +4448,15 @@ def cli_storage_osd_replace(
|
||||
|
||||
If OSDID is part of a split OSD set, any peer split OSDs with the same configured block device will be replaced as well. The split count will be retained and cannot be changed with this command; to do so, all OSDs in the split OSD set must be removed and new OSD(s) created.
|
||||
|
||||
WARNING: This operation entails (and is functionally equivalent to) a removal and recreation of the specified OSD and, if applicable, all peer split OSDs. This is an intensive and potentially destructive action. Ensure that the cluster is otherwise healthy before proceeding, and ensure the subsequent rebuild completes successfully. Do not attempt this operation on a severely degraded cluster without first considering the possible data loss implications.
|
||||
(!) WARNING: This operation entails (and is functionally equivalent to) a removal and recreation of the specified OSD and, if applicable, all peer split OSDs. This is an intensive and potentially destructive action. Ensure that the cluster is otherwise healthy before proceeding, and ensure the subsequent rebuild completes successfully. Do not attempt this operation on a severely degraded cluster without first considering the possible data loss implications.
|
||||
|
||||
If the "-o"/"--old-device" option is specified, is a valid block device on the node, is readable/accessible, and contains the metadata for the specified OSD, it will be zapped. If this option is not specified, the system will try to find the old block device automatically to zap it. If it can't be found, the OSD will simply be removed from the CRUSH map and PVC database before recreating. This option can provide a cleaner deletion when replacing a working device that has a different block path, but is otherwise unnecessary.
|
||||
|
||||
The "-w"/"--weight", "-r"/"--ext-db-ratio", and "-s"/"--ext-db-size" allow overriding the existing weight and external DB LV for the OSD(s), if desired. If unset, the existing weight and external DB LV size (if applicable) will be used for the replacement OSD(s) instead.
|
||||
|
||||
NOTE: If neither the "-r"/"--ext-db-ratio" or "-s"/"--ext-db-size" option is specified, and the OSD(s) had an external DB LV, it cannot be removed a new DB LV will be created for the replacement OSD(s); this cannot be avoided. However, if the OSD(s) did not have an external DB LV, and one of these options is specified, a new DB LV will be added to the new OSD.
|
||||
(†) NOTE: If neither the "-r"/"--ext-db-ratio" or "-s"/"--ext-db-size" option is specified, and the OSD(s) had an external DB LV, it cannot be removed a new DB LV will be created for the replacement OSD(s); this cannot be avoided. However, if the OSD(s) did not have an external DB LV, and one of these options is specified, a new DB LV will be added to the new OSD.
|
||||
|
||||
NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
(†) NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.storage.ceph_osd_replace(
|
||||
@ -4401,9 +4499,9 @@ def cli_storage_osd_refresh(osdid, device, wait_flag):
|
||||
|
||||
Existing data, IDs, weights, DB LVs, etc. of the OSD will be preserved. Any split peer OSD(s) on the same block device will also be automatically refreshed.
|
||||
|
||||
NOTE: If the OSD(s) had an external DB device, it must exist before refreshing the OSD. If it can't be found, the OSD cannot be reimported and must be recreated.
|
||||
(†) NOTE: If the OSD(s) had an external DB device, it must exist before refreshing the OSD. If it can't be found, the OSD cannot be reimported and must be recreated.
|
||||
|
||||
NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
(†) NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.storage.ceph_osd_refresh(
|
||||
@ -4444,9 +4542,9 @@ def cli_storage_osd_remove(osdid, force_flag, wait_flag):
|
||||
|
||||
DANGER: This will completely remove the OSD from the cluster. OSDs will rebalance which will negatively affect performance and available space. It is STRONGLY RECOMMENDED to set an OSD out (using 'pvc storage osd out') and allow the cluster to fully rebalance, verified with 'pvc storage status', before removing an OSD.
|
||||
|
||||
NOTE: The "-f"/"--force" option is useful after replacing a failed node, to ensure the OSD is removed even if the OSD in question does not properly exist on the node after a rebuild.
|
||||
(†) NOTE: The "-f"/"--force" option is useful after replacing a failed node, to ensure the OSD is removed even if the OSD in question does not properly exist on the node after a rebuild.
|
||||
|
||||
NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
(†) NOTE: This command may take a long time to complete. Observe the node logs of the hosting OSD node for detailed status.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc.lib.storage.ceph_osd_remove(
|
||||
@ -4926,7 +5024,7 @@ def cli_storage_volume_snapshot_add(pool, volume, name):
|
||||
"""
|
||||
Add a snapshot with name NAME of Ceph RBD volume VOLUME in pool POOL.
|
||||
|
||||
WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
(!) WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
of a running VM, restoring that snapshot will be equivalent to having forcibly restarted the
|
||||
VM at the moment of the snapshot.
|
||||
"""
|
||||
@ -4991,11 +5089,11 @@ def cli_storage_volume_snapshot_rollback(pool, volume, name):
|
||||
|
||||
DANGER: All data written to the volume since the given snapshot will be permanently lost.
|
||||
|
||||
WARNING: A rollback cannot be performed on an RBD volume with active I/O. Doing so will cause
|
||||
(!) WARNING: A rollback cannot be performed on an RBD volume with active I/O. Doing so will cause
|
||||
undefined behaviour and possible corruption. Ensure that any VM(s) using this RBD volume are
|
||||
stopped or disabled before attempting a snapshot rollback.
|
||||
|
||||
WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
(!) WARNING: RBD snapshots are crash-consistent but not filesystem-aware. If a snapshot was taken
|
||||
of a running VM, restoring that snapshot will be equivalent to having forcibly restarted the
|
||||
VM at the moment of the snapshot.
|
||||
"""
|
||||
@ -5499,7 +5597,7 @@ def cli_provisioner_template_network_vni_add(name, vni, permit_duplicate_flag):
|
||||
|
||||
Networks will be added to VMs in the order they are added and displayed within the template.
|
||||
|
||||
NOTE: Normally, the API prevents duplicate VNIs from being added to the same network template
|
||||
(†) NOTE: Normally, the API prevents duplicate VNIs from being added to the same network template
|
||||
by returning an error, as this requirement is very niche. If you do not desire this behaviour,
|
||||
use the "-d"/"--permit-duplicate" option to bypass the check.
|
||||
"""
|
||||
@ -6844,7 +6942,7 @@ def cli(
|
||||
else:
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
"WARNING: No client or home configuration directory found; using /tmp instead",
|
||||
"(†) NOTE: No client or home configuration directory found; using /tmp instead",
|
||||
stderr=True,
|
||||
)
|
||||
store_path = "/tmp/pvc"
|
||||
@ -6918,6 +7016,7 @@ cli_vm_backup.add_command(cli_vm_backup_restore)
|
||||
cli_vm_backup.add_command(cli_vm_backup_remove)
|
||||
cli_vm.add_command(cli_vm_backup)
|
||||
cli_vm.add_command(cli_vm_autobackup)
|
||||
cli_vm.add_command(cli_vm_automirror)
|
||||
cli_vm_tag.add_command(cli_vm_tag_get)
|
||||
cli_vm_tag.add_command(cli_vm_tag_add)
|
||||
cli_vm_tag.add_command(cli_vm_tag_remove)
|
||||
|
@ -64,6 +64,37 @@ def format_metric(integer):
|
||||
return human_integer
|
||||
|
||||
|
||||
def format_age(age_secs):
|
||||
human_age = f"{age_secs} seconds"
|
||||
|
||||
age_minutes = int(age_secs / 60)
|
||||
age_minutes_rounded = int(round(age_secs / 60))
|
||||
if age_minutes > 0:
|
||||
if age_minutes_rounded > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
human_age = f"{age_minutes_rounded} minute{s}"
|
||||
age_hours = int(age_secs / 3600)
|
||||
age_hours_rounded = int(round(age_secs / 3600))
|
||||
if age_hours > 0:
|
||||
if age_hours_rounded > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
human_age = f"{age_hours_rounded} hour{s}"
|
||||
age_days = int(age_secs / 86400)
|
||||
age_days_rounded = int(round(age_secs / 86400))
|
||||
if age_days > 0:
|
||||
if age_days_rounded > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
human_age = f"{age_days_rounded} day{s}"
|
||||
|
||||
return human_age
|
||||
|
||||
|
||||
class UploadProgressBar(object):
|
||||
def __init__(self, filename, end_message="", end_nl=True):
|
||||
file_size = os.path.getsize(filename)
|
||||
|
@ -23,7 +23,13 @@ import time
|
||||
import re
|
||||
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api, format_bytes, format_metric, get_wait_retdata
|
||||
from pvc.lib.common import (
|
||||
call_api,
|
||||
format_bytes,
|
||||
format_metric,
|
||||
format_age,
|
||||
get_wait_retdata,
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
@ -714,6 +720,26 @@ def vm_autobackup(config, email_recipients=None, force_full_flag=False, wait_fla
|
||||
return get_wait_retdata(response, wait_flag)
|
||||
|
||||
|
||||
def vm_automirror(
|
||||
config, email_recipients=None, email_errors_only_flag=False, wait_flag=True
|
||||
):
|
||||
"""
|
||||
Perform a cluster VM automirror
|
||||
|
||||
API endpoint: POST /vm//automirror
|
||||
API arguments: email_recipients=email_recipients, email_errors_only=email_errors_only_flag
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {
|
||||
"email_recipients": email_recipients,
|
||||
"email_errors_only": email_errors_only_flag,
|
||||
}
|
||||
|
||||
response = call_api(config, "post", "/vm/automirror", params=params)
|
||||
|
||||
return get_wait_retdata(response, wait_flag)
|
||||
|
||||
|
||||
def vm_vcpus_set(config, vm, vcpus, topology, restart):
|
||||
"""
|
||||
Set the vCPU count of the VM with topology
|
||||
@ -2036,7 +2062,7 @@ def format_info(config, domain_information, long_output):
|
||||
if _snapshots_name_length > snapshots_name_length:
|
||||
snapshots_name_length = _snapshots_name_length
|
||||
|
||||
_snapshots_age_length = len(snapshot["age"]) + 1
|
||||
_snapshots_age_length = len(format_age(snapshot["age"])) + 1
|
||||
if _snapshots_age_length > snapshots_age_length:
|
||||
snapshots_age_length = _snapshots_age_length
|
||||
|
||||
@ -2076,7 +2102,7 @@ def format_info(config, domain_information, long_output):
|
||||
snapshots_age_length=snapshots_age_length,
|
||||
snapshots_xml_changes_length=snapshots_xml_changes_length,
|
||||
snapshots_name=snapshot["name"],
|
||||
snapshots_age=snapshot["age"],
|
||||
snapshots_age=format_age(snapshot["age"]),
|
||||
snapshots_xml_changes=xml_diff_counts,
|
||||
end=ansiprint.end(),
|
||||
)
|
||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.103",
|
||||
version="0.9.105",
|
||||
packages=["pvc.cli", "pvc.lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
|
507
daemon-common/automirror.py
Normal file
507
daemon-common/automirror.py
Normal file
@ -0,0 +1,507 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# automirror.py - PVC API Automirror functions
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2024 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import requests
|
||||
|
||||
from datetime import datetime
|
||||
from os import popen
|
||||
|
||||
from daemon_lib.config import get_automirror_configuration
|
||||
from daemon_lib.celery import start, fail, log_info, log_warn, log_err, update, finish
|
||||
|
||||
import daemon_lib.vm as vm
|
||||
|
||||
|
||||
def send_execution_failure_report(
|
||||
celery, config, recipients=None, total_time=0, error=None
|
||||
):
|
||||
if recipients is None:
|
||||
return
|
||||
|
||||
from email.utils import formatdate
|
||||
from socket import gethostname
|
||||
|
||||
log_message = f"Sending email failure report to {', '.join(recipients)}"
|
||||
log_info(celery, log_message)
|
||||
|
||||
current_datetime = datetime.now()
|
||||
email_datetime = formatdate(float(current_datetime.strftime("%s")))
|
||||
|
||||
email = list()
|
||||
email.append(f"Date: {email_datetime}")
|
||||
email.append(
|
||||
f"Subject: PVC Automirror execution failure for cluster '{config['cluster']}'"
|
||||
)
|
||||
|
||||
email_to = list()
|
||||
for recipient in recipients:
|
||||
email_to.append(f"<{recipient}>")
|
||||
|
||||
email.append(f"To: {', '.join(email_to)}")
|
||||
email.append(f"From: PVC Automirror System <pvc@{gethostname()}>")
|
||||
email.append("")
|
||||
|
||||
email.append(
|
||||
f"A PVC automirror has FAILED at {current_datetime} in {total_time}s due to an execution error."
|
||||
)
|
||||
email.append("")
|
||||
email.append("The reported error message is:")
|
||||
email.append(f" {error}")
|
||||
|
||||
try:
|
||||
with popen("/usr/sbin/sendmail -t", "w") as p:
|
||||
p.write("\n".join(email))
|
||||
except Exception as e:
|
||||
log_err(f"Failed to send report email: {e}")
|
||||
|
||||
|
||||
def send_execution_summary_report(
|
||||
celery,
|
||||
config,
|
||||
recipients=None,
|
||||
total_time=0,
|
||||
summary=dict(),
|
||||
local_deleted_snapshots=dict(),
|
||||
):
|
||||
if recipients is None:
|
||||
return
|
||||
|
||||
from email.utils import formatdate
|
||||
from socket import gethostname
|
||||
|
||||
log_message = f"Sending email summary report to {', '.join(recipients)}"
|
||||
log_info(celery, log_message)
|
||||
|
||||
current_datetime = datetime.now()
|
||||
email_datetime = formatdate(float(current_datetime.strftime("%s")))
|
||||
|
||||
email = list()
|
||||
email.append(f"Date: {email_datetime}")
|
||||
email.append(f"Subject: PVC Automirror report for cluster '{config['cluster']}'")
|
||||
|
||||
email_to = list()
|
||||
for recipient in recipients:
|
||||
email_to.append(f"<{recipient}>")
|
||||
|
||||
email.append(f"To: {', '.join(email_to)}")
|
||||
email.append(f"From: PVC Automirror System <pvc@{gethostname()}>")
|
||||
email.append("")
|
||||
|
||||
email.append(
|
||||
f"A PVC automirror has been completed at {current_datetime} in {total_time}."
|
||||
)
|
||||
email.append("")
|
||||
email.append(
|
||||
"The following is a summary of all VM mirror jobs executed during this run:"
|
||||
)
|
||||
email.append("")
|
||||
|
||||
vm_names = {k.split(":")[0] for k in summary.keys()}
|
||||
for vm_name in vm_names:
|
||||
email.append(f"VM {vm_name}:")
|
||||
email.append(" Mirror jobs:")
|
||||
for destination_name in {
|
||||
k.split(":")[1] for k in summary.keys() if k.split(":")[0] == vm_name
|
||||
}:
|
||||
mirror = summary[f"{vm_name}:{destination_name}"]
|
||||
datestring = mirror.get("snapshot_name").replace("am", "")
|
||||
mirror_date = datetime.strptime(datestring, "%Y%m%d%H%M%S")
|
||||
if mirror.get("result", False):
|
||||
email.append(
|
||||
f" * {mirror_date}: Success to cluster {destination_name} in {mirror.get('runtime_secs', 0)} seconds, ID {mirror.get('snapshot_name')}"
|
||||
)
|
||||
else:
|
||||
email.append(
|
||||
f" * {mirror_date}: Failure to cluster {destination_name} in {mirror.get('runtime_secs', 0)} seconds, ID {mirror.get('snapshot_name')}"
|
||||
)
|
||||
email.append(
|
||||
f" {mirror.get('result_message')}"
|
||||
)
|
||||
|
||||
email.append(
|
||||
" The following aged-out local snapshots were removed during cleanup:"
|
||||
)
|
||||
for snapshot in local_deleted_snapshots[vm_name]:
|
||||
email.append(f" * {snapshot}")
|
||||
|
||||
try:
|
||||
with popen("/usr/sbin/sendmail -t", "w") as p:
|
||||
p.write("\n".join(email))
|
||||
except Exception as e:
|
||||
log_err(f"Failed to send report email: {e}")
|
||||
|
||||
|
||||
def run_vm_mirror(
|
||||
zkhandler, celery, config, vm_detail, snapshot_name, destination_name
|
||||
):
|
||||
vm_name = vm_detail["name"]
|
||||
keep_count = config["mirror_keep_snapshots"]
|
||||
|
||||
try:
|
||||
destination = config["mirror_destinations"][destination_name]
|
||||
except Exception:
|
||||
error_message = f"Failed to find valid destination cluster '{destination_name}' for VM '{vm_name}'"
|
||||
log_err(celery, error_message)
|
||||
return error_message
|
||||
|
||||
destination_api_uri = f"{'https' if destination['ssl'] else 'http'}://{destination['address']}:{destination['port']}{destination['prefix']}"
|
||||
destination_api_timeout = (3.05, 172800)
|
||||
destination_api_headers = {
|
||||
"X-Api-Key": destination["key"],
|
||||
}
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update(destination_api_headers)
|
||||
session.verify = destination["verify_ssl"]
|
||||
session.timeout = destination_api_timeout
|
||||
|
||||
# Get the last snapshot that is on the remote side for incrementals
|
||||
response = session.get(
|
||||
f"{destination_api_uri}/vm/{vm_name}",
|
||||
params=None,
|
||||
data=None,
|
||||
)
|
||||
destination_vm_detail = response.json()
|
||||
if type(destination_vm_detail) is list and len(destination_vm_detail) > 0:
|
||||
destination_vm_detail = destination_vm_detail[0]
|
||||
try:
|
||||
last_snapshot_name = [
|
||||
s
|
||||
for s in destination_vm_detail["snapshots"]
|
||||
if s["name"].startswith("am")
|
||||
][0]["name"]
|
||||
except Exception:
|
||||
last_snapshot_name = None
|
||||
else:
|
||||
last_snapshot_name = None
|
||||
|
||||
# Send the current snapshot
|
||||
result, message = vm.vm_worker_send_snapshot(
|
||||
zkhandler,
|
||||
None,
|
||||
vm_name,
|
||||
snapshot_name,
|
||||
destination_api_uri,
|
||||
destination["key"],
|
||||
destination_api_verify_ssl=destination["verify_ssl"],
|
||||
incremental_parent=last_snapshot_name,
|
||||
destination_storage_pool=destination["pool"],
|
||||
return_status=True,
|
||||
)
|
||||
|
||||
if not result:
|
||||
return False, message
|
||||
|
||||
response = session.get(
|
||||
f"{destination_api_uri}/vm/{vm_name}",
|
||||
params=None,
|
||||
data=None,
|
||||
)
|
||||
destination_vm_detail = response.json()
|
||||
if type(destination_vm_detail) is list and len(destination_vm_detail) > 0:
|
||||
destination_vm_detail = destination_vm_detail[0]
|
||||
else:
|
||||
message = "Remote VM somehow does not exist after successful mirror; skipping snapshot cleanup"
|
||||
return False, message
|
||||
|
||||
# Find any mirror snapshots that are expired
|
||||
remote_snapshots = [
|
||||
s for s in destination_vm_detail["snapshots"] if s["name"].startswith("am")
|
||||
]
|
||||
|
||||
# Snapshots are in dated descending order due to the names
|
||||
if len(remote_snapshots) > keep_count:
|
||||
remote_marked_for_deletion = [s["name"] for s in remote_snapshots[keep_count:]]
|
||||
else:
|
||||
remote_marked_for_deletion = list()
|
||||
|
||||
for snapshot in remote_marked_for_deletion:
|
||||
log_info(
|
||||
celery,
|
||||
f"VM {vm_detail['name']} removing stale remote automirror snapshot {snapshot}",
|
||||
)
|
||||
session.delete(
|
||||
f"{destination_api_uri}/vm/{vm_name}/snapshot",
|
||||
params={
|
||||
"snapshot_name": snapshot,
|
||||
},
|
||||
data=None,
|
||||
)
|
||||
|
||||
session.close()
|
||||
|
||||
return True, remote_marked_for_deletion
|
||||
|
||||
|
||||
def worker_cluster_automirror(
|
||||
zkhandler,
|
||||
celery,
|
||||
force_full=False,
|
||||
email_recipients=None,
|
||||
email_errors_only=False,
|
||||
):
|
||||
config = get_automirror_configuration()
|
||||
|
||||
mirror_summary = dict()
|
||||
local_deleted_snapshots = dict()
|
||||
|
||||
current_stage = 0
|
||||
total_stages = 1
|
||||
|
||||
start(
|
||||
celery,
|
||||
f"Starting cluster '{config['cluster']}' VM automirror",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
if not config["automirror_enabled"]:
|
||||
message = "Automirrors are not configured on this cluster."
|
||||
log_info(celery, message)
|
||||
return finish(
|
||||
celery,
|
||||
message,
|
||||
current=total_stages,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
if email_recipients is not None:
|
||||
total_stages += 1
|
||||
|
||||
automirror_start_time = datetime.now()
|
||||
|
||||
retcode, vm_list = vm.get_list(zkhandler)
|
||||
if not retcode:
|
||||
error_message = f"Failed to fetch VM list: {vm_list}"
|
||||
log_err(celery, error_message)
|
||||
current_stage += 1
|
||||
send_execution_failure_report(
|
||||
celery,
|
||||
config,
|
||||
recipients=email_recipients,
|
||||
error=error_message,
|
||||
)
|
||||
fail(celery, error_message)
|
||||
return False
|
||||
|
||||
mirror_vms = list()
|
||||
for vm_detail in vm_list:
|
||||
mirror_vm = {
|
||||
"detail": vm_detail,
|
||||
"destinations": list(),
|
||||
}
|
||||
vm_tag_names = [t["name"] for t in vm_detail["tags"]]
|
||||
# Check if any of the mirror tags are present; if they are, then we should mirror
|
||||
vm_mirror_tags = list()
|
||||
for tag in vm_tag_names:
|
||||
if tag.split(":")[0] in config["mirror_tags"]:
|
||||
vm_mirror_tags.append(tag)
|
||||
|
||||
# There are no mirror tags, so skip this VM
|
||||
if len(vm_mirror_tags) < 1:
|
||||
continue
|
||||
|
||||
# Go through each tag to extract the cluster
|
||||
target_clusters = set()
|
||||
for tag in vm_mirror_tags:
|
||||
if len(tag.split(":")) == 1:
|
||||
# This is a direct match without any cluster suffix, so use the default
|
||||
target_clusters.add(config["mirror_default_destination"])
|
||||
if len(tag.split(":")) > 1:
|
||||
# This has a cluster suffix, so use that
|
||||
target_clusters.add(tag.split(":")[1])
|
||||
|
||||
for cluster in target_clusters:
|
||||
mirror_vm["destinations"].append(cluster)
|
||||
|
||||
mirror_vms.append(mirror_vm)
|
||||
|
||||
if len(mirror_vms) < 1:
|
||||
message = "Found no VMs tagged for automirror."
|
||||
log_info(celery, message)
|
||||
return finish(
|
||||
celery,
|
||||
message,
|
||||
current=total_stages,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
total_stages += len(mirror_vms)
|
||||
|
||||
mirror_vm_names = set([b["detail"]["name"] for b in mirror_vms])
|
||||
|
||||
log_info(
|
||||
celery,
|
||||
f"Found {len(mirror_vm_names)} suitable VM(s) for automirror: {', '.join(mirror_vm_names)}",
|
||||
)
|
||||
|
||||
# Execute the backup: take a snapshot, then export the snapshot
|
||||
for mirror_vm in mirror_vms:
|
||||
vm_detail = mirror_vm["detail"]
|
||||
vm_destinations = mirror_vm["destinations"]
|
||||
|
||||
current_stage += 1
|
||||
update(
|
||||
celery,
|
||||
f"Performing automirror of VM {vm_detail['name']}",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
# Automirrors use a custom name to allow them to be properly cleaned up later
|
||||
now = datetime.now()
|
||||
datestring = now.strftime("%Y%m%d%H%M%S")
|
||||
snapshot_name = f"am{datestring}"
|
||||
|
||||
result, message = vm.vm_worker_create_snapshot(
|
||||
zkhandler,
|
||||
None,
|
||||
vm_detail["name"],
|
||||
snapshot_name=snapshot_name,
|
||||
return_status=True,
|
||||
)
|
||||
if not result:
|
||||
for destination in vm_destinations:
|
||||
mirror_summary[f"{vm_detail['name']}:{destination}"] = {
|
||||
"result": result,
|
||||
"snapshot_name": snapshot_name,
|
||||
"runtime_secs": 0,
|
||||
"result_message": message,
|
||||
}
|
||||
continue
|
||||
|
||||
remote_marked_for_deletion = dict()
|
||||
all_results = list()
|
||||
for destination in vm_destinations:
|
||||
mirror_start = datetime.now()
|
||||
result, ret = run_vm_mirror(
|
||||
zkhandler,
|
||||
celery,
|
||||
config,
|
||||
vm_detail,
|
||||
snapshot_name,
|
||||
destination,
|
||||
)
|
||||
mirror_end = datetime.now()
|
||||
runtime_secs = (mirror_end - mirror_start).seconds
|
||||
all_results.append(result)
|
||||
if result:
|
||||
remote_marked_for_deletion[destination] = ret
|
||||
|
||||
mirror_summary[f"{vm_detail['name']}:{destination}"] = {
|
||||
"result": result,
|
||||
"snapshot_name": snapshot_name,
|
||||
"runtime_secs": runtime_secs,
|
||||
}
|
||||
else:
|
||||
log_warn(
|
||||
celery,
|
||||
f"Error in mirror send: {ret}",
|
||||
)
|
||||
mirror_summary[f"{vm_detail['name']}:{destination}"] = {
|
||||
"result": result,
|
||||
"snapshot_name": snapshot_name,
|
||||
"runtime_secs": runtime_secs,
|
||||
"result_message": ret,
|
||||
}
|
||||
|
||||
# If all sends failed, remove the snapshot we created as it will never be needed or automatically cleaned up later
|
||||
if not any(all_results):
|
||||
vm.vm_worker_remove_snapshot(
|
||||
zkhandler,
|
||||
None,
|
||||
vm_detail["name"],
|
||||
snapshot_name,
|
||||
)
|
||||
|
||||
# Find all local snapshots that were present in all remote snapshot deletions,
|
||||
# then remove them
|
||||
# If one of the sends fails, this should result in nothing being removed
|
||||
if remote_marked_for_deletion:
|
||||
all_lists = [set(lst) for lst in remote_marked_for_deletion.values() if lst]
|
||||
if all_lists:
|
||||
local_marked_for_deletion = set.intersection(*all_lists)
|
||||
else:
|
||||
local_marked_for_deletion = set()
|
||||
else:
|
||||
local_marked_for_deletion = set()
|
||||
|
||||
for snapshot in local_marked_for_deletion:
|
||||
log_info(
|
||||
celery,
|
||||
f"VM {vm_detail['name']} removing stale local automirror snapshot {snapshot}",
|
||||
)
|
||||
vm.vm_worker_remove_snapshot(
|
||||
zkhandler,
|
||||
None,
|
||||
vm_detail["name"],
|
||||
snapshot,
|
||||
)
|
||||
|
||||
local_deleted_snapshots[vm_detail["name"]] = local_marked_for_deletion
|
||||
|
||||
automirror_end_time = datetime.now()
|
||||
automirror_total_time = automirror_end_time - automirror_start_time
|
||||
|
||||
if email_recipients is not None:
|
||||
current_stage += 1
|
||||
if email_errors_only and not all(
|
||||
[s["result"] for _, s in mirror_summary.items()]
|
||||
):
|
||||
# Send report if we're in errors only and at least one send failed
|
||||
send_report = True
|
||||
elif not email_errors_only:
|
||||
# Send report if we're not in errors only
|
||||
send_report = True
|
||||
else:
|
||||
# Otherwise (errors only and all successful) don't send
|
||||
send_report = False
|
||||
|
||||
if send_report:
|
||||
update(
|
||||
celery,
|
||||
"Sending automirror results summary email",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
send_execution_summary_report(
|
||||
celery,
|
||||
config,
|
||||
recipients=email_recipients,
|
||||
total_time=automirror_total_time,
|
||||
summary=mirror_summary,
|
||||
local_deleted_snapshots=local_deleted_snapshots,
|
||||
)
|
||||
else:
|
||||
update(
|
||||
celery,
|
||||
"Skipping automirror results summary email (no failures)",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
current_stage += 1
|
||||
return finish(
|
||||
celery,
|
||||
f"Successfully completed cluster '{config['cluster']}' VM automirror",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
@ -22,6 +22,7 @@
|
||||
|
||||
import sys
|
||||
|
||||
from inspect import stack
|
||||
from logging import getLogger
|
||||
from time import sleep
|
||||
|
||||
@ -32,7 +33,8 @@ class TaskFailure(Exception):
|
||||
|
||||
def start(celery, msg, current=0, total=1):
|
||||
logger = getLogger(__name__)
|
||||
logger.info(f"Starting {current}/{total}: {msg}")
|
||||
caller_name = stack()[1].function
|
||||
logger.info(f"Start {caller_name} {current}/{total}: {msg}")
|
||||
if celery is None:
|
||||
return
|
||||
celery.update_state(
|
||||
@ -42,13 +44,14 @@ def start(celery, msg, current=0, total=1):
|
||||
|
||||
|
||||
def fail(celery, msg, exception=None, current=1, total=1):
|
||||
caller_name = stack()[1].function
|
||||
if exception is None:
|
||||
exception = TaskFailure
|
||||
|
||||
msg = f"{type(exception()).__name__}: {msg}"
|
||||
|
||||
logger = getLogger(__name__)
|
||||
logger.error(msg)
|
||||
logger.error(f"Fail {caller_name} {current}/{total}: {msg}")
|
||||
|
||||
sys.tracebacklimit = 0
|
||||
raise exception(msg)
|
||||
@ -56,22 +59,26 @@ def fail(celery, msg, exception=None, current=1, total=1):
|
||||
|
||||
def log_info(celery, msg):
|
||||
logger = getLogger(__name__)
|
||||
logger.info(f"Task log: {msg}")
|
||||
caller_name = stack()[1].function
|
||||
logger.info(f"Log {caller_name}: {msg}")
|
||||
|
||||
|
||||
def log_warn(celery, msg):
|
||||
logger = getLogger(__name__)
|
||||
logger.warning(f"Task log: {msg}")
|
||||
caller_name = stack()[1].function
|
||||
logger.warning(f"Log {caller_name}: {msg}")
|
||||
|
||||
|
||||
def log_err(celery, msg):
|
||||
logger = getLogger(__name__)
|
||||
logger.error(f"Task log: {msg}")
|
||||
caller_name = stack()[1].function
|
||||
logger.error(f"Log {caller_name}: {msg}")
|
||||
|
||||
|
||||
def update(celery, msg, current=1, total=2):
|
||||
logger = getLogger(__name__)
|
||||
logger.info(f"Task update {current}/{total}: {msg}")
|
||||
caller_name = stack()[1].function
|
||||
logger.info(f"Update {caller_name} {current}/{total}: {msg}")
|
||||
if celery is None:
|
||||
return
|
||||
celery.update_state(
|
||||
@ -82,7 +89,8 @@ def update(celery, msg, current=1, total=2):
|
||||
|
||||
def finish(celery, msg, current=2, total=2):
|
||||
logger = getLogger(__name__)
|
||||
logger.info(f"Task update {current}/{total}: Finishing up")
|
||||
caller_name = stack()[1].function
|
||||
logger.info(f"Update {caller_name} {current}/{total}: Finishing up")
|
||||
if celery is None:
|
||||
return
|
||||
celery.update_state(
|
||||
@ -90,5 +98,5 @@ def finish(celery, msg, current=2, total=2):
|
||||
meta={"current": current, "total": total, "status": "Finishing up"},
|
||||
)
|
||||
sleep(1)
|
||||
logger.info(f"Success {current}/{total}: {msg}")
|
||||
logger.info(f"Success {caller_name} {current}/{total}: {msg}")
|
||||
return {"status": msg, "current": current, "total": total}
|
||||
|
@ -121,6 +121,9 @@ def format_bytes_tohuman(databytes):
|
||||
|
||||
|
||||
def format_bytes_fromhuman(datahuman):
|
||||
if not isinstance(datahuman, str):
|
||||
datahuman = str(datahuman)
|
||||
|
||||
if not re.search(r"[A-Za-z]+", datahuman):
|
||||
dataunit = "B"
|
||||
datasize = float(datahuman)
|
||||
|
@ -486,34 +486,11 @@ def getDomainSnapshots(zkhandler, dom_uuid):
|
||||
|
||||
_snap_timestamp = float(snap_timestamp)
|
||||
snap_age_secs = int(current_timestamp) - int(_snap_timestamp)
|
||||
snap_age = f"{snap_age_secs} seconds"
|
||||
snap_age_minutes = int(snap_age_secs / 60)
|
||||
if snap_age_minutes > 0:
|
||||
if snap_age_minutes > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
snap_age = f"{snap_age_minutes} minute{s}"
|
||||
snap_age_hours = int(snap_age_secs / 3600)
|
||||
if snap_age_hours > 0:
|
||||
if snap_age_hours > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
snap_age = f"{snap_age_hours} hour{s}"
|
||||
snap_age_days = int(snap_age_secs / 86400)
|
||||
if snap_age_days > 0:
|
||||
if snap_age_days > 1:
|
||||
s = "s"
|
||||
else:
|
||||
s = ""
|
||||
snap_age = f"{snap_age_days} day{s}"
|
||||
|
||||
snapshots.append(
|
||||
{
|
||||
"name": snap_name,
|
||||
"timestamp": snap_timestamp,
|
||||
"age": snap_age,
|
||||
"age": snap_age_secs,
|
||||
"xml_diff_lines": snap_dom_xml_diff,
|
||||
"rbd_snapshots": snap_rbd_snapshots,
|
||||
}
|
||||
|
@ -481,6 +481,64 @@ def get_autobackup_configuration():
|
||||
return config
|
||||
|
||||
|
||||
def get_parsed_automirror_configuration(config_file):
|
||||
"""
|
||||
Load the configuration; this is the same main pvc.conf that the daemons read
|
||||
"""
|
||||
print('Loading configuration from file "{}"'.format(config_file))
|
||||
|
||||
with open(config_file, "r") as cfgfh:
|
||||
try:
|
||||
o_config = yaml.load(cfgfh, Loader=yaml.SafeLoader)
|
||||
except Exception as e:
|
||||
print(f"ERROR: Failed to parse configuration file: {e}")
|
||||
os._exit(1)
|
||||
|
||||
config = dict()
|
||||
|
||||
try:
|
||||
o_cluster = o_config["cluster"]
|
||||
config_cluster = {
|
||||
"cluster": o_cluster["name"],
|
||||
"automirror_enabled": True,
|
||||
}
|
||||
config = {**config, **config_cluster}
|
||||
|
||||
o_automirror = o_config["automirror"]
|
||||
if o_automirror is None:
|
||||
config["automirror_enabled"] = False
|
||||
return config
|
||||
|
||||
config_automirror = {
|
||||
"mirror_tags": o_automirror["mirror_tags"],
|
||||
"mirror_destinations": o_automirror["destinations"],
|
||||
"mirror_default_destination": o_automirror["default_destination"],
|
||||
"mirror_keep_snapshots": o_automirror["keep_snapshots"],
|
||||
}
|
||||
config = {**config, **config_automirror}
|
||||
|
||||
if config["mirror_default_destination"] not in [
|
||||
d for d in config["mirror_destinations"].keys()
|
||||
]:
|
||||
raise Exception(
|
||||
"Specified default mirror destination is not in the list of destinations"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise MalformedConfigurationError(e)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_automirror_configuration():
|
||||
"""
|
||||
Get the configuration.
|
||||
"""
|
||||
pvc_config_file = get_configuration_path()
|
||||
config = get_parsed_automirror_configuration(pvc_config_file)
|
||||
return config
|
||||
|
||||
|
||||
def validate_directories(config):
|
||||
if not os.path.exists(config["dynamic_directory"]):
|
||||
os.makedirs(config["dynamic_directory"])
|
||||
|
@ -2107,6 +2107,7 @@ def vm_worker_create_snapshot(
|
||||
domain,
|
||||
snapshot_name=None,
|
||||
zk_only=False,
|
||||
return_status=False,
|
||||
):
|
||||
if snapshot_name is None:
|
||||
now = datetime.now()
|
||||
@ -2124,27 +2125,34 @@ def vm_worker_create_snapshot(
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zkhandler, domain)
|
||||
if not dom_uuid:
|
||||
fail(
|
||||
celery,
|
||||
f"Could not find VM '{domain}' in the cluster",
|
||||
)
|
||||
return False
|
||||
message = (f"Could not find VM '{domain}' in the cluster",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
reg = re.compile("^[a-z0-9.-_]+$")
|
||||
if not reg.match(snapshot_name):
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
"Snapshot name '{snapshot_name}' contains invalid characters; only alphanumeric, '.', '-', and '_' characters are allowed",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
current_snapshots = zkhandler.children(("domain.snapshots", dom_uuid))
|
||||
if current_snapshots and snapshot_name in current_snapshots:
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Snapshot name '{snapshot_name}' already exists for VM '{domain}'!",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Get the list of all RBD volumes
|
||||
rbd_list = zkhandler.read(("domain.storage.volumes", dom_uuid)).split(",")
|
||||
@ -2178,11 +2186,12 @@ def vm_worker_create_snapshot(
|
||||
)
|
||||
if not ret:
|
||||
cleanup_failure()
|
||||
fail(
|
||||
celery,
|
||||
msg.replace("ERROR: ", ""),
|
||||
)
|
||||
return False
|
||||
message = (msg.replace("ERROR: ", ""),)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
snap_list.append(f"{pool}/{volume}@{snapshot_name}")
|
||||
|
||||
@ -2242,12 +2251,22 @@ def vm_worker_create_snapshot(
|
||||
)
|
||||
|
||||
current_stage += 1
|
||||
return finish(
|
||||
celery,
|
||||
f"Successfully created snapshot '{snapshot_name}' of VM '{domain}'",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
message = (f"Successfully created snapshot '{snapshot_name}' of VM '{domain}'",)
|
||||
if return_status:
|
||||
finish(
|
||||
celery,
|
||||
message,
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
return True, message
|
||||
else:
|
||||
return finish(
|
||||
celery,
|
||||
message,
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
|
||||
def vm_worker_remove_snapshot(
|
||||
@ -3157,6 +3176,7 @@ def vm_worker_send_snapshot(
|
||||
destination_api_verify_ssl=True,
|
||||
incremental_parent=None,
|
||||
destination_storage_pool=None,
|
||||
return_status=False,
|
||||
):
|
||||
|
||||
current_stage = 0
|
||||
@ -3171,11 +3191,12 @@ def vm_worker_send_snapshot(
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zkhandler, domain)
|
||||
if not dom_uuid:
|
||||
fail(
|
||||
celery,
|
||||
f"Could not find VM '{domain}' in the cluster",
|
||||
)
|
||||
return False
|
||||
message = (f"Could not find VM '{domain}' in the cluster",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Get our side's VM configuration details
|
||||
try:
|
||||
@ -3184,31 +3205,34 @@ def vm_worker_send_snapshot(
|
||||
vm_detail = None
|
||||
|
||||
if not isinstance(vm_detail, dict):
|
||||
fail(
|
||||
celery,
|
||||
f"VM listing returned invalid data: {vm_detail}",
|
||||
)
|
||||
return False
|
||||
message = (f"VM listing returned invalid data: {vm_detail}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Check if the snapshot exists
|
||||
if not zkhandler.exists(
|
||||
("domain.snapshots", dom_uuid, "domain_snapshot.name", snapshot_name)
|
||||
):
|
||||
fail(
|
||||
celery,
|
||||
f"Could not find snapshot '{snapshot_name}' of VM '{domain}'",
|
||||
)
|
||||
return False
|
||||
message = (f"Could not find snapshot '{snapshot_name}' of VM '{domain}'",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Check if the incremental parent exists
|
||||
if incremental_parent is not None and not zkhandler.exists(
|
||||
("domain.snapshots", dom_uuid, "domain_snapshot.name", incremental_parent)
|
||||
):
|
||||
fail(
|
||||
celery,
|
||||
f"Could not find snapshot '{snapshot_name}' of VM '{domain}'",
|
||||
)
|
||||
return False
|
||||
message = (f"Could not find snapshot '{snapshot_name}' of VM '{domain}'",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
vm_name = vm_detail["name"]
|
||||
|
||||
@ -3234,23 +3258,26 @@ def vm_worker_send_snapshot(
|
||||
if "PVC API" not in response.json().get("message"):
|
||||
raise ValueError("Remote API is not a PVC API or incorrect URI given")
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
fail(
|
||||
celery,
|
||||
f"Connection to remote API timed out: {e}",
|
||||
)
|
||||
return False
|
||||
message = (f"Connection to remote API timed out: {e}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
except ValueError as e:
|
||||
fail(
|
||||
celery,
|
||||
f"Connection to remote API is not valid: {e}",
|
||||
)
|
||||
return False
|
||||
message = (f"Connection to remote API is not valid: {e}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
fail(
|
||||
celery,
|
||||
f"Connection to remote API failed: {e}",
|
||||
)
|
||||
return False
|
||||
message = (f"Connection to remote API failed: {e}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Hit the API "/status" endpoint to validate API key and cluster status
|
||||
response = session.get(
|
||||
@ -3263,11 +3290,14 @@ def vm_worker_send_snapshot(
|
||||
"pvc_version", None
|
||||
)
|
||||
if current_destination_pvc_version is None:
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
"Connection to remote API failed: no PVC version information returned",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
expected_destination_pvc_version = "0.9.101"
|
||||
# Work around development versions
|
||||
@ -3278,11 +3308,14 @@ def vm_worker_send_snapshot(
|
||||
if parse_version(current_destination_pvc_version) < parse_version(
|
||||
expected_destination_pvc_version
|
||||
):
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Remote PVC cluster is too old: requires version {expected_destination_pvc_version} or higher",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Check if the VM already exists on the remote
|
||||
response = session.get(
|
||||
@ -3301,11 +3334,12 @@ def vm_worker_send_snapshot(
|
||||
current_destination_vm_state is not None
|
||||
and current_destination_vm_state != "mirror"
|
||||
):
|
||||
fail(
|
||||
celery,
|
||||
"Remote PVC VM exists and is not a mirror",
|
||||
)
|
||||
return False
|
||||
message = ("Remote PVC VM exists and is not a mirror",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Get details about VM snapshot
|
||||
_, snapshot_timestamp, snapshot_xml, snapshot_rbdsnaps = zkhandler.read_many(
|
||||
@ -3351,31 +3385,38 @@ def vm_worker_send_snapshot(
|
||||
|
||||
# Check if this snapshot is in the remote list already
|
||||
if snapshot_name in [s["name"] for s in destination_vm_snapshots]:
|
||||
fail(
|
||||
celery,
|
||||
f"Snapshot {snapshot_name} already exists on the target",
|
||||
)
|
||||
return False
|
||||
message = (f"Snapshot {snapshot_name} already exists on the target",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Check if this snapshot is older than the latest remote VM snapshot
|
||||
if (
|
||||
len(destination_vm_snapshots) > 0
|
||||
and snapshot_timestamp < destination_vm_snapshots[0]["timestamp"]
|
||||
):
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Target has a newer snapshot ({destination_vm_snapshots[0]['name']}); cannot send old snapshot {snapshot_name}",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Check that our incremental parent exists on the remote VM
|
||||
if incremental_parent is not None:
|
||||
if incremental_parent not in [s["name"] for s in destination_vm_snapshots]:
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Can not send incremental for a snapshot ({incremental_parent}) which does not exist on the target",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Begin send, set stages
|
||||
total_stages += 1 + (3 * len(snapshot_rbdsnaps))
|
||||
@ -3393,6 +3434,25 @@ def vm_worker_send_snapshot(
|
||||
"source_snapshot": incremental_parent,
|
||||
}
|
||||
|
||||
# Strip out autobackup and automirror tags
|
||||
# These should never be wanted on the receiving side
|
||||
from daemon_lib.config import (
|
||||
get_autobackup_configuration,
|
||||
get_automirror_configuration,
|
||||
)
|
||||
|
||||
autobackup_config = get_autobackup_configuration()
|
||||
automirror_config = get_automirror_configuration()
|
||||
new_tags = list()
|
||||
for tag in vm_detail["tags"]:
|
||||
tag_base = tag["name"].split(":")[0]
|
||||
if tag_base in [
|
||||
t for t in autobackup_config.get("backup_tags", [])
|
||||
] or tag_base in [t for t in automirror_config.get("mirror_tags", [])]:
|
||||
continue
|
||||
new_tags.append(tag)
|
||||
vm_detail["tags"] = new_tags
|
||||
|
||||
response = session.post(
|
||||
f"{destination_api_uri}/vm/{vm_name}/snapshot/receive/config",
|
||||
headers={"Content-Type": "application/json"},
|
||||
@ -3400,11 +3460,12 @@ def vm_worker_send_snapshot(
|
||||
json=vm_detail,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
fail(
|
||||
celery,
|
||||
f"Failed to send config: {response.json()['message']}",
|
||||
)
|
||||
return False
|
||||
message = (f"Failed to send config: {response.json()['message']}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Create the block devices on the remote side if this is a new VM send
|
||||
block_t_start = time.time()
|
||||
@ -3431,11 +3492,12 @@ def vm_worker_send_snapshot(
|
||||
error_message = f"Multiple details returned for volume {rbd_name}"
|
||||
else:
|
||||
error_message = f"Error getting details for volume {rbd_name}"
|
||||
fail(
|
||||
celery,
|
||||
error_message,
|
||||
)
|
||||
return False
|
||||
message = (error_message,)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
try:
|
||||
local_volume_size = ceph.format_bytes_fromhuman(retdata[0]["stats"]["size"])
|
||||
@ -3460,11 +3522,12 @@ def vm_worker_send_snapshot(
|
||||
data=None,
|
||||
)
|
||||
if response.status_code != 404 and current_destination_vm_state is None:
|
||||
fail(
|
||||
celery,
|
||||
f"Remote storage pool {pool} already contains volume {volume}",
|
||||
)
|
||||
return False
|
||||
message = (f"Remote storage pool {pool} already contains volume {volume}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
if current_destination_vm_state is not None:
|
||||
try:
|
||||
@ -3474,7 +3537,10 @@ def vm_worker_send_snapshot(
|
||||
except Exception as e:
|
||||
error_message = f"Failed to get volume size for remote {rbd_name}: {e}"
|
||||
fail(celery, error_message)
|
||||
return False
|
||||
if return_status:
|
||||
return False, error_message
|
||||
else:
|
||||
return False
|
||||
|
||||
if local_volume_size != remote_volume_size:
|
||||
response = session.put(
|
||||
@ -3482,11 +3548,12 @@ def vm_worker_send_snapshot(
|
||||
params={"new_size": local_volume_size, "force": True},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
fail(
|
||||
celery,
|
||||
"Failed to resize remote volume to match local volume",
|
||||
)
|
||||
return False
|
||||
message = ("Failed to resize remote volume to match local volume",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
# Send the volume to the remote
|
||||
cluster = rados.Rados(conffile="/etc/ceph/ceph.conf")
|
||||
@ -3557,11 +3624,14 @@ def vm_worker_send_snapshot(
|
||||
stream=True,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Failed to send diff batch: {response.json()['message']}",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
|
||||
current_chunk_time = time.time()
|
||||
chunk_time = current_chunk_time - last_chunk_time
|
||||
@ -3609,11 +3679,12 @@ def vm_worker_send_snapshot(
|
||||
buffer.clear() # Clear the buffer after sending
|
||||
buffer_size = 0 # Reset buffer size
|
||||
except Exception:
|
||||
fail(
|
||||
celery,
|
||||
f"Failed to send snapshot: {response.json()['message']}",
|
||||
)
|
||||
return False
|
||||
message = (f"Failed to send snapshot: {response.json()['message']}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
finally:
|
||||
image.close()
|
||||
ioctx.close()
|
||||
@ -3657,11 +3728,14 @@ def vm_worker_send_snapshot(
|
||||
data=full_chunker(),
|
||||
)
|
||||
if response.status_code != 200:
|
||||
fail(
|
||||
celery,
|
||||
message = (
|
||||
f"Failed to send snapshot: {response.json()['message']}",
|
||||
)
|
||||
return False
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
finally:
|
||||
image.close()
|
||||
ioctx.close()
|
||||
@ -3678,11 +3752,12 @@ def vm_worker_send_snapshot(
|
||||
params=send_params,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
fail(
|
||||
celery,
|
||||
f"Failed to send snapshot: {response.json()['message']}",
|
||||
)
|
||||
return False
|
||||
message = (f"Failed to send snapshot: {response.json()['message']}",)
|
||||
fail(celery, message)
|
||||
if return_status:
|
||||
return False, message
|
||||
else:
|
||||
return False
|
||||
finally:
|
||||
image.close()
|
||||
ioctx.close()
|
||||
@ -3692,12 +3767,24 @@ def vm_worker_send_snapshot(
|
||||
block_mbps = round(block_total_mb / (block_t_end - block_t_start), 1)
|
||||
|
||||
current_stage += 1
|
||||
return finish(
|
||||
celery,
|
||||
message = (
|
||||
f"Successfully sent snapshot '{snapshot_name}' of VM '{domain}' to remote cluster '{destination_api_uri}' (average {block_mbps} MB/s)",
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
if return_status:
|
||||
finish(
|
||||
celery,
|
||||
message,
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
return True, message
|
||||
else:
|
||||
return finish(
|
||||
celery,
|
||||
message,
|
||||
current=current_stage,
|
||||
total=total_stages,
|
||||
)
|
||||
|
||||
|
||||
def vm_worker_create_mirror(
|
||||
|
16
debian/changelog
vendored
16
debian/changelog
vendored
@ -1,3 +1,19 @@
|
||||
pvc (0.9.105-0) unstable; urgency=high
|
||||
|
||||
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 19 Nov 2024 14:43:43 -0500
|
||||
|
||||
pvc (0.9.104-0) unstable; urgency=high
|
||||
|
||||
* [API Daemon] Fixes a bug that failed uploading of RAW block devices in "storage volume upload"
|
||||
* [API Daemon/CLI Client] Adds support for VM automirrors, replicating the functionality of autobackup but for cross-cluster mirroring
|
||||
* [CLI Client] Improves the help output of several commands
|
||||
* [API Daemon/CLI Client] Moves VM snapshot age conversions to human-readable values out of the API and into the client to open up more programatic handling in the future
|
||||
* [Worker Daemon] Improves the Celery logging output clarity by including the calling function in any task output
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Mon, 18 Nov 2024 10:53:56 -0500
|
||||
|
||||
pvc (0.9.103-0) unstable; urgency=high
|
||||
|
||||
* [Provisioner] Fixes a bug with the change in `storage_hosts` to FQDNs affecting the VM Builder
|
||||
|
@ -33,7 +33,7 @@ import os
|
||||
import signal
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.103"
|
||||
version = "0.9.105"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
@ -49,7 +49,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.103"
|
||||
version = "0.9.105"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
@ -393,6 +393,8 @@ api:
|
||||
private_key: ""
|
||||
|
||||
# Automatic backups
|
||||
# If this section is present, autobackups will be enabled; otherwise, they will be disabled.
|
||||
# The pvc-ansible roles manage this including the various timer units, so avoid adjusting this manually.
|
||||
autobackup:
|
||||
|
||||
# Backup root path on the node, used as the remote mountpoint
|
||||
@ -451,5 +453,55 @@ autobackup:
|
||||
# This example shows a generic umount leveraging the backup_root_path variable
|
||||
- "/usr/bin/umount {backup_root_path}"
|
||||
|
||||
# Automatic mirroring to peer clusters
|
||||
# If this section is present, automirrors will be enabled; otherwise, they will be disabled.
|
||||
# The pvc-ansible roles manage this including the various timer units, so avoid adjusting this manually.
|
||||
automirror:
|
||||
|
||||
# Destination clusters
|
||||
# A list of destination cluster API endpoints to send mirrors to.
|
||||
# For each entry, the "name" field will be mapped to the "{cluster}" variable in the tag(s)
|
||||
# above. For more details on how exactly this works, please consult the documentation.
|
||||
destinations:
|
||||
|
||||
# An example entry; contains the same information as a "pvc connection" entry
|
||||
# The key in this dictionary is the "name" of the cluster, which is what must be suffixed
|
||||
# to a tag and is displayed in the report and status output.
|
||||
cluster2:
|
||||
# The destination address, either an IP or an FQDN the destination API is reachable at
|
||||
address: pvc.cluster2.mydomain.tld
|
||||
# The destination port (usually 7370)
|
||||
port: 7370
|
||||
# The API prefix (usually '/api/v1') without a trailing slash
|
||||
prefix: "/api/v1"
|
||||
# The API key of the destination
|
||||
key: 00000000-0000-0000-0000-000000000000
|
||||
# Whether or not to use SSL for the connection
|
||||
ssl: yes
|
||||
# Whether or not to verify SSL for the connection
|
||||
verify_ssl: yes
|
||||
# Storage pool for VMs on the destination
|
||||
pool: vms
|
||||
|
||||
# Default destination
|
||||
# The cluster name to send mirrors to for VMs without an explicit "{cluster}" tag
|
||||
# Always required, even if there is only a single destination
|
||||
default_destination: cluster2
|
||||
|
||||
# VM tag(s) to mirror
|
||||
# Only VMs with at least one of the given tag(s) will be mirrored; all others will be skipped
|
||||
# All mirror tags support suffixing a ":{cluster}" argument, which will override the default
|
||||
# cluster and send mirrors to the given cluster name (in the list below). Multiple suffixed
|
||||
# tags are supported; if more than one is, the VM will be mirrored to all specified clusters.
|
||||
mirror_tags:
|
||||
- "automirror"
|
||||
|
||||
# The number of snapshots to keep, on both sides - mirror snapshots older than the last
|
||||
# X snapshots will be automatically removed to save space
|
||||
# Depending on the interval specified in the pvc-ansible variables, this may be either a
|
||||
# relatively short or relatively long time.
|
||||
keep_snapshots: 7
|
||||
|
||||
|
||||
# VIM modeline, requires "set modeline" in your VIMRC
|
||||
# vim: expandtab shiftwidth=2 tabstop=2 filetype=yaml
|
||||
|
@ -53,9 +53,12 @@ from daemon_lib.vmbuilder import (
|
||||
from daemon_lib.autobackup import (
|
||||
worker_cluster_autobackup,
|
||||
)
|
||||
from daemon_lib.automirror import (
|
||||
worker_cluster_automirror,
|
||||
)
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.103"
|
||||
version = "0.9.105"
|
||||
|
||||
|
||||
config = cfg.get_configuration()
|
||||
@ -122,6 +125,26 @@ def cluster_autobackup(self, force_full=False, email_recipients=None, run_on="pr
|
||||
)
|
||||
|
||||
|
||||
@celery.task(name="cluster.automirror", bind=True, routing_key="run_on")
|
||||
def cluster_automirror(
|
||||
self, email_recipients=None, email_errors_only=False, run_on="primary"
|
||||
):
|
||||
@ZKConnection(config)
|
||||
def run_cluster_automirror(
|
||||
zkhandler, self, email_recipients=None, email_errors_only=False
|
||||
):
|
||||
return worker_cluster_automirror(
|
||||
zkhandler,
|
||||
self,
|
||||
email_recipients=email_recipients,
|
||||
email_errors_only=email_errors_only,
|
||||
)
|
||||
|
||||
return run_cluster_automirror(
|
||||
self, email_recipients=email_recipients, email_errors_only=email_errors_only
|
||||
)
|
||||
|
||||
|
||||
@celery.task(name="vm.flush_locks", bind=True, routing_key="run_on")
|
||||
def vm_flush_locks(self, domain=None, force_unlock=False, run_on="primary"):
|
||||
@ZKConnection(config)
|
||||
|
Reference in New Issue
Block a user