Compare commits
21 Commits
Author | SHA1 | Date | |
---|---|---|---|
aeb238f43c | |||
671a907236 | |||
e945fd8590 | |||
a49510ecc8 | |||
6d7730ab52 | |||
8135426973 | |||
20d436a745 | |||
28f6819726 | |||
35c07f0384 | |||
6127387be4 | |||
343d66875b | |||
92feeefd26 | |||
38d63d9837 | |||
095bcb2373 | |||
91e450f399 | |||
79eb994a5e | |||
d65f512897 | |||
8af7189dd0 | |||
ea7a4b2b85 | |||
59f97ebbfb | |||
072337f1f0 |
25
CHANGELOG.md
25
CHANGELOG.md
@ -1,5 +1,30 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.61](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.61)
|
||||
|
||||
* [provisioner] Fixes a bug in network comparison
|
||||
* [api] Fixes a bug being unable to rename disabled VMs
|
||||
|
||||
###### [v0.9.60](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.60)
|
||||
|
||||
* [Provisioner] Cleans up several remaining bugs in the example scripts; they should all be valid now
|
||||
* [Provisioner] Adjust default libvirt schema to disable RBD caching for a 2x+ performance boost
|
||||
|
||||
###### [v0.9.59](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.59)
|
||||
|
||||
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
|
||||
|
||||
###### [v0.9.58](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.58)
|
||||
|
||||
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
|
||||
|
||||
###### [v0.9.57](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.57)
|
||||
|
||||
* [CLI] Removes an invalid reference to VXLAN
|
||||
* [CLI] Improves the handling of invalid networks in VM lists and on attach
|
||||
* [API] Modularizes the benchmarking library so it can be used externally too
|
||||
* [Daemon Library] Adds a module tag file so it can be used externally too
|
||||
|
||||
###### [v0.9.56](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.56)
|
||||
|
||||
**Breaking Change**: Existing provisioner scripts are no longer valid; new example scripts are provided.
|
||||
|
@ -398,7 +398,7 @@ class VMBuilderScript(VMBuilder):
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
if volume.get("filesystem") is None or volume.get("filesystem") == "swap":
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
@ -473,7 +473,7 @@ class VMBuilderScript(VMBuilder):
|
||||
]
|
||||
|
||||
# We need to know our root disk for later GRUB-ing
|
||||
root_disk = None
|
||||
root_volume = None
|
||||
for volume in volumes:
|
||||
if volume["mountpoint"] == "/":
|
||||
root_volume = volume
|
||||
@ -725,6 +725,7 @@ GRUB_DISABLE_LINUX_UUID=false
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
and volume.get("filesystem") != "swap"
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
|
@ -20,7 +20,7 @@
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config and install a RHEL-like OS using rinse.
|
||||
# standard VM config and install a RHEL 8+ or similar OS using rinse.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
@ -398,7 +398,7 @@ class VMBuilderScript(VMBuilder):
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
if volume.get("filesystem") is None or volume.get("filesystem") == "swap":
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
@ -487,7 +487,7 @@ class VMBuilderScript(VMBuilder):
|
||||
post_packages = ["cloud-init"]
|
||||
|
||||
# We need to know our root disk for later GRUB-ing
|
||||
root_disk = None
|
||||
root_volume = None
|
||||
for volume in volumes:
|
||||
if volume["mountpoint"] == "/":
|
||||
root_volume = volume
|
||||
@ -571,21 +571,6 @@ class VMBuilderScript(VMBuilder):
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken by default
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
# We lose our indent on these raw blocks to preserve the apperance of the files
|
||||
# inside the VM itself
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
@ -682,11 +667,6 @@ GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=
|
||||
# Set the timezone to UTC
|
||||
os.system("ln -sf ../usr/share/zoneinfo/UTC /etc/localtime")
|
||||
|
||||
# Unmount the bound devfs and sysfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
os.system("umount {}/sys".format(temporary_directory))
|
||||
os.system("umount {}/proc".format(temporary_directory))
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
@ -700,6 +680,7 @@ GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import os
|
||||
from pvcapid.vmbuilder import open_zk
|
||||
from pvcapid.Daemon import config
|
||||
import daemon_lib.common as pvc_common
|
||||
@ -708,6 +689,11 @@ GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=
|
||||
# Set the tempdir we used in the prepare() and install() steps
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
# Unmount the bound devfs and sysfs
|
||||
os.system(f"umount {temp_dir}/dev")
|
||||
os.system(f"umount {temp_dir}/sys")
|
||||
os.system(f"umount {temp_dir}/proc")
|
||||
|
||||
# Use this construct for reversing the list, as the normal reverse() messes with the list
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
@ -718,6 +704,7 @@ GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
and volume.get("filesystem") != "swap"
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
|
@ -27,7 +27,7 @@ from ssl import SSLContext, TLSVersion
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.56"
|
||||
version = "0.9.61"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
|
@ -32,6 +32,74 @@ import daemon_lib.common as pvc_common
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
|
||||
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
||||
# 1. A sequential read test of 8GB with a 4M block size
|
||||
# 2. A sequential write test of 8GB with a 4M block size
|
||||
# 3. A random read test of 8GB with a 4M block size
|
||||
# 4. A random write test of 8GB with a 4M block size
|
||||
# 5. A random read test of 8GB with a 256k block size
|
||||
# 6. A random write test of 8GB with a 256k block size
|
||||
# 7. A random read test of 8GB with a 4k block size
|
||||
# 8. A random write test of 8GB with a 4k block size
|
||||
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
||||
# for a variety of workloads.
|
||||
test_matrix = {
|
||||
"seq_read": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "read",
|
||||
},
|
||||
"seq_write": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "write",
|
||||
},
|
||||
"rand_read_4M": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4M": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
"rand_read_4K": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4K": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
"rand_read_4K_lowdepth": {
|
||||
"direction": "read",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4K_lowdepth": {
|
||||
"direction": "write",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# Specify the benchmark volume name and size
|
||||
benchmark_volume_name = "pvcbenchmark"
|
||||
benchmark_volume_size = "8G"
|
||||
|
||||
|
||||
#
|
||||
# Exceptions (used by Celery tasks)
|
||||
#
|
||||
@ -44,7 +112,7 @@ class BenchmarkError(Exception):
|
||||
self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||
):
|
||||
self.message = message
|
||||
if job_name is not None:
|
||||
if job_name is not None and db_conn is not None and db_cur is not None:
|
||||
# Clean up our dangling result
|
||||
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
||||
args = (job_name,)
|
||||
@ -52,6 +120,7 @@ class BenchmarkError(Exception):
|
||||
db_conn.commit()
|
||||
# Close the database connections cleanly
|
||||
close_database(db_conn, db_cur)
|
||||
if job_name is not None and zkhandler is not None:
|
||||
zkhandler.disconnect()
|
||||
|
||||
def __str__(self):
|
||||
@ -116,6 +185,90 @@ def list_benchmarks(job=None):
|
||||
return {"message": "No benchmark found."}, 404
|
||||
|
||||
|
||||
def prepare_benchmark_volume(
|
||||
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||
):
|
||||
# Create the RBD volume
|
||||
retcode, retmsg = pvc_ceph.add_volume(
|
||||
zkhandler, pool, benchmark_volume_name, benchmark_volume_size
|
||||
)
|
||||
if not retcode:
|
||||
raise BenchmarkError(
|
||||
'Failed to create volume "{}" on pool "{}": {}'.format(
|
||||
benchmark_volume_name, pool, retmsg
|
||||
),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
|
||||
def cleanup_benchmark_volume(
|
||||
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||
):
|
||||
# Remove the RBD volume
|
||||
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, benchmark_volume_name)
|
||||
if not retcode:
|
||||
raise BenchmarkError(
|
||||
'Failed to remove volume "{}" on pool "{}": {}'.format(
|
||||
benchmark_volume_name, pool, retmsg
|
||||
),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
|
||||
def run_benchmark_job(
|
||||
test, pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||
):
|
||||
test_spec = test_matrix[test]
|
||||
print("Running test '{}'".format(test))
|
||||
fio_cmd = """
|
||||
fio \
|
||||
--name={test} \
|
||||
--ioengine=rbd \
|
||||
--pool={pool} \
|
||||
--rbdname={volume} \
|
||||
--output-format=json \
|
||||
--direct=1 \
|
||||
--randrepeat=1 \
|
||||
--numjobs=1 \
|
||||
--time_based \
|
||||
--runtime=75 \
|
||||
--group_reporting \
|
||||
--iodepth={iodepth} \
|
||||
--bs={bs} \
|
||||
--readwrite={rw}
|
||||
""".format(
|
||||
test=test,
|
||||
pool=pool,
|
||||
volume=benchmark_volume_name,
|
||||
iodepth=test_spec["iodepth"],
|
||||
bs=test_spec["bs"],
|
||||
rw=test_spec["rw"],
|
||||
)
|
||||
|
||||
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
||||
if retcode:
|
||||
raise BenchmarkError(
|
||||
"Failed to run fio test: {}".format(stderr),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
|
||||
return loads(stdout)
|
||||
|
||||
|
||||
def run_benchmark(self, pool):
|
||||
# Runtime imports
|
||||
import time
|
||||
@ -172,20 +325,13 @@ def run_benchmark(self, pool):
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
volume = "pvcbenchmark"
|
||||
|
||||
# Create the RBD volume
|
||||
retcode, retmsg = pvc_ceph.add_volume(zkhandler, pool, volume, "8G")
|
||||
if not retcode:
|
||||
raise BenchmarkError(
|
||||
'Failed to create volume "{}": {}'.format(volume, retmsg),
|
||||
prepare_benchmark_volume(
|
||||
pool,
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
# Phase 2 - benchmark run
|
||||
self.update_state(
|
||||
@ -194,99 +340,17 @@ def run_benchmark(self, pool):
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
||||
# 1. A sequential read test of 8GB with a 4M block size
|
||||
# 2. A sequential write test of 8GB with a 4M block size
|
||||
# 3. A random read test of 8GB with a 4M block size
|
||||
# 4. A random write test of 8GB with a 4M block size
|
||||
# 5. A random read test of 8GB with a 256k block size
|
||||
# 6. A random write test of 8GB with a 256k block size
|
||||
# 7. A random read test of 8GB with a 4k block size
|
||||
# 8. A random write test of 8GB with a 4k block size
|
||||
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
||||
# for a variety of workloads.
|
||||
test_matrix = {
|
||||
"seq_read": {"direction": "read", "iodepth": "64", "bs": "4M", "rw": "read"},
|
||||
"seq_write": {"direction": "write", "iodepth": "64", "bs": "4M", "rw": "write"},
|
||||
"rand_read_4M": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4M": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
"rand_read_4K": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4K": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
"rand_read_4K_lowdepth": {
|
||||
"direction": "read",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
"rand_write_4K_lowdepth": {
|
||||
"direction": "write",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
}
|
||||
|
||||
results = dict()
|
||||
for test in test_matrix:
|
||||
print("Running test '{}'".format(test))
|
||||
fio_cmd = """
|
||||
fio \
|
||||
--name={test} \
|
||||
--ioengine=rbd \
|
||||
--pool={pool} \
|
||||
--rbdname={volume} \
|
||||
--output-format=json \
|
||||
--direct=1 \
|
||||
--randrepeat=1 \
|
||||
--numjobs=1 \
|
||||
--time_based \
|
||||
--runtime=75 \
|
||||
--group_reporting \
|
||||
--iodepth={iodepth} \
|
||||
--bs={bs} \
|
||||
--readwrite={rw}
|
||||
""".format(
|
||||
test=test,
|
||||
pool=pool,
|
||||
volume=volume,
|
||||
iodepth=test_matrix[test]["iodepth"],
|
||||
bs=test_matrix[test]["bs"],
|
||||
rw=test_matrix[test]["rw"],
|
||||
)
|
||||
|
||||
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
||||
if retcode:
|
||||
raise BenchmarkError(
|
||||
"Failed to run fio test: {}".format(stderr),
|
||||
results[test] = run_benchmark_job(
|
||||
test,
|
||||
pool,
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
|
||||
results[test] = loads(stdout)
|
||||
|
||||
# Phase 3 - cleanup
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
@ -294,18 +358,13 @@ def run_benchmark(self, pool):
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Remove the RBD volume
|
||||
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, volume)
|
||||
if not retcode:
|
||||
raise BenchmarkError(
|
||||
'Failed to remove volume "{}": {}'.format(volume, retmsg),
|
||||
cleanup_benchmark_volume(
|
||||
pool,
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
print("Storing result of tests for job '{}' in database".format(job_name))
|
||||
try:
|
||||
|
@ -1253,7 +1253,7 @@ class API_VM_Root(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1302,7 +1302,7 @@ class API_VM_Root(Resource):
|
||||
default: none
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- memprov
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
@ -1400,7 +1400,7 @@ class API_VM_Element(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1451,7 +1451,7 @@ class API_VM_Element(Resource):
|
||||
default: none
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- memprov
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
@ -1650,7 +1650,7 @@ class API_VM_Metadata(Resource):
|
||||
{"name": "limit"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1682,7 +1682,7 @@ class API_VM_Metadata(Resource):
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- memprov
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
|
@ -539,9 +539,9 @@ def get_vm_meta(zkhandler, vm):
|
||||
retdata = {
|
||||
"name": vm,
|
||||
"node_limit": domain_node_limit,
|
||||
"node_selector": domain_node_selector,
|
||||
"node_selector": domain_node_selector.lower(),
|
||||
"node_autostart": domain_node_autostart,
|
||||
"migration_method": domain_migrate_method,
|
||||
"migration_method": domain_migrate_method.lower(),
|
||||
}
|
||||
|
||||
return retdata, retcode
|
||||
|
@ -100,7 +100,7 @@ devices_scsi_controller = """ <controller type='scsi' index='0' model='virtio
|
||||
# * vm_name
|
||||
# * disk_id
|
||||
devices_disk_header = """ <disk type='network' device='disk'>
|
||||
<driver name='qemu' discard='unmap'/>
|
||||
<driver name='qemu' discard='unmap' cache='none'/>
|
||||
<target dev='{disk_id}' bus='scsi'/>
|
||||
<auth username='libvirt'>
|
||||
<secret type='ceph' uuid='{ceph_storage_secret}'/>
|
||||
|
@ -580,7 +580,7 @@ def delete_template_network_element(name, vni):
|
||||
networks, code = list_template_network_vnis(name)
|
||||
found_vni = False
|
||||
for network in networks:
|
||||
if network["vni"] == int(vni):
|
||||
if network["vni"] == vni:
|
||||
found_vni = True
|
||||
if not found_vni:
|
||||
retmsg = {
|
||||
|
@ -679,6 +679,10 @@ def vm_networks_add(
|
||||
from random import randint
|
||||
import pvc.cli_lib.network as pvc_network
|
||||
|
||||
network_exists, _ = pvc_network.net_info(config, network)
|
||||
if not network_exists:
|
||||
return False, "Network {} not found on the cluster.".format(network)
|
||||
|
||||
status, domain_information = vm_info(config, vm)
|
||||
if not status:
|
||||
return status, domain_information
|
||||
@ -2016,7 +2020,8 @@ def format_list(config, vm_list, raw):
|
||||
tag_list = getNiceTagName(domain_information)
|
||||
if len(tag_list) < 1:
|
||||
tag_list = ["N/A"]
|
||||
vm_net_colour = ""
|
||||
|
||||
net_invalid_list = []
|
||||
for net_vni in net_list:
|
||||
if (
|
||||
net_vni not in ["cluster", "storage", "upstream"]
|
||||
@ -2024,13 +2029,33 @@ def format_list(config, vm_list, raw):
|
||||
and not re.match(r"^hostdev:.*", net_vni)
|
||||
):
|
||||
if int(net_vni) not in [net["vni"] for net in cluster_net_list]:
|
||||
vm_net_colour = ansiprint.red()
|
||||
net_invalid_list.append(True)
|
||||
else:
|
||||
net_invalid_list.append(False)
|
||||
else:
|
||||
net_invalid_list.append(False)
|
||||
|
||||
net_string_list = []
|
||||
for net_idx, net_vni in enumerate(net_list):
|
||||
if net_invalid_list[net_idx]:
|
||||
net_string_list.append(
|
||||
"{}{}{}".format(
|
||||
ansiprint.red(),
|
||||
net_vni,
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
# Fix the length due to the extra fake characters
|
||||
vm_nets_length -= len(net_vni)
|
||||
vm_nets_length += len(net_string_list[net_idx])
|
||||
else:
|
||||
net_string_list.append(net_vni)
|
||||
|
||||
vm_list_output.append(
|
||||
"{bold}{vm_name: <{vm_name_length}} \
|
||||
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
|
||||
{vm_tags: <{vm_tags_length}} \
|
||||
{vm_net_colour}{vm_networks: <{vm_nets_length}}{end_colour} \
|
||||
{vm_networks: <{vm_nets_length}} \
|
||||
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
|
||||
{vm_node: <{vm_node_length}} \
|
||||
{vm_migrated: <{vm_migrated_length}}{end_bold}".format(
|
||||
@ -2049,8 +2074,7 @@ def format_list(config, vm_list, raw):
|
||||
vm_name=domain_information["name"],
|
||||
vm_state=domain_information["state"],
|
||||
vm_tags=",".join(tag_list),
|
||||
vm_net_colour=vm_net_colour,
|
||||
vm_networks=",".join(net_list),
|
||||
vm_networks=",".join(net_string_list),
|
||||
vm_memory=domain_information["memory"],
|
||||
vm_vcpu=domain_information["vcpu"],
|
||||
vm_node=domain_information["node"],
|
||||
|
@ -807,7 +807,7 @@ def cli_vm():
|
||||
"node_selector",
|
||||
default="none",
|
||||
show_default=True,
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@ -859,15 +859,15 @@ def vm_define(
|
||||
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
|
||||
|
||||
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
|
||||
* "mem": choose the node with the least provisioned VM memory
|
||||
* "memfree": choose the node with the most (real) free memory
|
||||
* "mem": choose the node with the most (real) free memory
|
||||
* "memprov": choose the node with the least provisioned VM memory
|
||||
* "vcpus": choose the node with the least allocated VM vCPUs
|
||||
* "load": choose the node with the lowest current load average
|
||||
* "vms": choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* "mem" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* "memfree" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to "mem" on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* "mem" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
|
||||
* "memprov" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
"""
|
||||
|
||||
@ -914,7 +914,7 @@ def vm_define(
|
||||
"node_selector",
|
||||
default=None,
|
||||
show_default=False,
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@ -2404,7 +2404,7 @@ def vm_list(target_node, target_state, target_tag, limit, raw, negate):
|
||||
)
|
||||
def cli_network():
|
||||
"""
|
||||
Manage the state of a VXLAN network in the PVC cluster.
|
||||
Manage the state of a network in the PVC cluster.
|
||||
"""
|
||||
pass
|
||||
|
||||
@ -4134,7 +4134,7 @@ def provisioner_template_system_list(limit):
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
default="none",
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
@ -4230,7 +4230,7 @@ def provisioner_template_system_add(
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.56",
|
||||
version="0.9.61",
|
||||
packages=["pvc", "pvc.cli_lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
|
0
daemon-common/__init__.py
Normal file
0
daemon-common/__init__.py
Normal file
@ -633,14 +633,14 @@ def findTargetNode(zkhandler, dom_uuid):
|
||||
search_field = None
|
||||
|
||||
# If our search field is invalid, use the default
|
||||
if search_field is None or search_field == "None":
|
||||
if search_field is None or search_field in ["None", "none"]:
|
||||
search_field = zkhandler.read("base.config.migration_target_selector")
|
||||
|
||||
# Execute the search
|
||||
if search_field == "mem":
|
||||
return findTargetNodeMem(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "memfree":
|
||||
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "memprov":
|
||||
return findTargetNodeMemProv(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "load":
|
||||
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "vcpus":
|
||||
@ -678,10 +678,28 @@ def getNodes(zkhandler, node_limit, dom_uuid):
|
||||
return valid_node_list
|
||||
|
||||
|
||||
#
|
||||
# via free memory
|
||||
#
|
||||
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
||||
most_memfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memfree = int(zkhandler.read(("node.memory.free", node)))
|
||||
|
||||
if memfree > most_memfree:
|
||||
most_memfree = memfree
|
||||
target_node = node
|
||||
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via provisioned memory
|
||||
#
|
||||
def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
def findTargetNodeMemProv(zkhandler, node_limit, dom_uuid):
|
||||
most_provfree = 0
|
||||
target_node = None
|
||||
|
||||
@ -700,24 +718,6 @@ def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via free memory
|
||||
#
|
||||
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
||||
most_memfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memfree = int(zkhandler.read(("node.memory.free", node)))
|
||||
|
||||
if memfree > most_memfree:
|
||||
most_memfree = memfree
|
||||
target_node = node
|
||||
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via load average
|
||||
#
|
||||
|
@ -308,9 +308,9 @@ def define_vm(
|
||||
(("domain.console.log", dom_uuid), ""),
|
||||
(("domain.console.vnc", dom_uuid), ""),
|
||||
(("domain.meta.autostart", dom_uuid), node_autostart),
|
||||
(("domain.meta.migrate_method", dom_uuid), migration_method),
|
||||
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower()),
|
||||
(("domain.meta.node_limit", dom_uuid), formatted_node_limit),
|
||||
(("domain.meta.node_selector", dom_uuid), node_selector),
|
||||
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower()),
|
||||
(("domain.meta.tags", dom_uuid), ""),
|
||||
(("domain.migrate.sync_lock", dom_uuid), ""),
|
||||
]
|
||||
@ -447,7 +447,9 @@ def modify_vm_metadata(
|
||||
update_list.append((("domain.meta.node_limit", dom_uuid), node_limit))
|
||||
|
||||
if node_selector is not None:
|
||||
update_list.append((("domain.meta.node_selector", dom_uuid), node_selector))
|
||||
update_list.append(
|
||||
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower())
|
||||
)
|
||||
|
||||
if node_autostart is not None:
|
||||
update_list.append((("domain.meta.autostart", dom_uuid), node_autostart))
|
||||
@ -456,7 +458,9 @@ def modify_vm_metadata(
|
||||
update_list.append((("domain.profile", dom_uuid), provisioner_profile))
|
||||
|
||||
if migration_method is not None:
|
||||
update_list.append((("domain.meta.migrate_method", dom_uuid), migration_method))
|
||||
update_list.append(
|
||||
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower())
|
||||
)
|
||||
|
||||
if len(update_list) < 1:
|
||||
return False, "ERROR: No updates to apply."
|
||||
@ -640,7 +644,7 @@ def rename_vm(zkhandler, domain, new_domain):
|
||||
|
||||
# Verify that the VM is in a stopped state; renaming is not supported otherwise
|
||||
state = zkhandler.read(("domain.state", dom_uuid))
|
||||
if state != "stop":
|
||||
if state not in ["stop", "disable"]:
|
||||
return (
|
||||
False,
|
||||
'ERROR: VM "{}" is not in stopped state; VMs cannot be renamed while running.'.format(
|
||||
|
35
debian/changelog
vendored
35
debian/changelog
vendored
@ -1,3 +1,38 @@
|
||||
pvc (0.9.61-0) unstable; urgency=high
|
||||
|
||||
* [provisioner] Fixes a bug in network comparison
|
||||
* [api] Fixes a bug being unable to rename disabled VMs
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 08 Feb 2023 10:08:05 -0500
|
||||
|
||||
pvc (0.9.60-0) unstable; urgency=high
|
||||
|
||||
* [Provisioner] Cleans up several remaining bugs in the example scripts; they should all be valid now
|
||||
* [Provisioner] Adjust default libvirt schema to disable RBD caching for a 2x+ performance boost
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 06 Dec 2022 15:42:55 -0500
|
||||
|
||||
pvc (0.9.59-0) unstable; urgency=high
|
||||
|
||||
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 15 Nov 2022 15:50:15 -0500
|
||||
|
||||
pvc (0.9.58-0) unstable; urgency=high
|
||||
|
||||
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Mon, 07 Nov 2022 12:27:48 -0500
|
||||
|
||||
pvc (0.9.57-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Removes an invalid reference to VXLAN
|
||||
* [CLI] Improves the handling of invalid networks in VM lists and on attach
|
||||
* [API] Modularizes the benchmarking library so it can be used externally too
|
||||
* [Daemon Library] Adds a module tag file so it can be used externally too
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Sun, 06 Nov 2022 01:39:50 -0400
|
||||
|
||||
pvc (0.9.56-0) unstable; urgency=high
|
||||
|
||||
* [API/Provisioner] Fundamentally revamps the provisioner script framework to provide more extensibility (BREAKING CHANGE)
|
||||
|
@ -356,15 +356,15 @@ The password for the PVC node daemon to log in to the IPMI interface.
|
||||
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
|
||||
|
||||
Valid `target_selector` values are:
|
||||
* `mem`: choose the node with the least provisioned VM memory
|
||||
* `memfree`: choose the node with the most (real) free memory
|
||||
* `mem`: choose the node with the most (real) free memory
|
||||
* `memprov`: choose the node with the least provisioned VM memory
|
||||
* `vcpus`: choose the node with the least allocated VM vCPUs
|
||||
* `load`: choose the node with the lowest current load average
|
||||
* `vms`: choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* `mem` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* `memfree` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to `mem` on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* `mem` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
|
||||
* `memprov` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
|
||||
#### `system` → `configuration` → `directories` → `dynamic_directory`
|
||||
|
@ -122,7 +122,7 @@ pvc:
|
||||
pass: Passw0rd
|
||||
# migration: Migration option configuration
|
||||
migration:
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, memfree, load, vcpus, vms
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, memprov, load, vcpus, vms
|
||||
target_selector: mem
|
||||
# configuration: Local system configurations
|
||||
configuration:
|
||||
|
@ -48,7 +48,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.56"
|
||||
version = "0.9.61"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
Reference in New Issue
Block a user