Reformat code with Black code formatter
Unify the code style along PEP and Black principles using the tool.
This commit is contained in:
@ -64,29 +64,35 @@ def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs['vm_name']
|
||||
temporary_directory = kwargs['temporary_directory']
|
||||
disks = kwargs['disks']
|
||||
networks = kwargs['networks']
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# Our own required arguments. We should, though are not required to, handle
|
||||
# failures of these gracefully, should administrators forget to specify them.
|
||||
try:
|
||||
deb_release = kwargs['deb_release']
|
||||
deb_release = kwargs["deb_release"]
|
||||
except Exception:
|
||||
deb_release = "stable"
|
||||
try:
|
||||
deb_mirror = kwargs['deb_mirror']
|
||||
deb_mirror = kwargs["deb_mirror"]
|
||||
except Exception:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
try:
|
||||
deb_packages = kwargs['deb_packages'].split(',')
|
||||
deb_packages = kwargs["deb_packages"].split(",")
|
||||
except Exception:
|
||||
deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend", "wget"]
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk
|
||||
root_disk = None
|
||||
for disk in disks:
|
||||
if disk['mountpoint'] == '/':
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk = disk
|
||||
if not root_disk:
|
||||
return
|
||||
@ -95,9 +101,7 @@ def install(**kwargs):
|
||||
# good idea to include if you plan to use anything that is not part of the
|
||||
# base Debian host system, just in case the provisioner host is not properly
|
||||
# configured already.
|
||||
os.system(
|
||||
"apt-get install -y debootstrap"
|
||||
)
|
||||
os.system("apt-get install -y debootstrap")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
@ -105,16 +109,12 @@ def install(**kwargs):
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=','.join(deb_packages)
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs
|
||||
os.system(
|
||||
"mount --bind /dev {}/dev".format(
|
||||
temporary_directory
|
||||
)
|
||||
)
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each disk
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
@ -130,11 +130,11 @@ def install(**kwargs):
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk['mountpoint'] == "/":
|
||||
root_disk['scsi_id'] = disk_id
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk["scsi_id"] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk['mountpoint'] == '/var' or disk['mountpoint'] == '/var/log':
|
||||
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
@ -142,14 +142,14 @@ def install(**kwargs):
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, 'a') as fh:
|
||||
with open(fstab_file, "a") as fh:
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
disk=disk_id,
|
||||
mountpoint=disk['mountpoint'],
|
||||
filesystem=disk['filesystem'],
|
||||
mountpoint=disk["mountpoint"],
|
||||
filesystem=disk["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
@ -158,12 +158,14 @@ def install(**kwargs):
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, 'w') as fh:
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(temporary_directory)
|
||||
with open(cloudinit_target_file, 'w') as fh:
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
@ -176,7 +178,7 @@ After=multi-user.target
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory)
|
||||
with open(ens2_network_file, 'w') as fh:
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
@ -184,25 +186,31 @@ iface ens2 inet dhcp
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, 'w') as fh:
|
||||
data = """# DHCP client configuration
|
||||
with open(dhclient_file, "w") as fh:
|
||||
data = (
|
||||
"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
""" + """ send fqdn.fqdn = "{hostname}";
|
||||
"""
|
||||
+ """ send fqdn.fqdn = "{hostname}";
|
||||
send host-name = "{hostname}";
|
||||
""".format(hostname=vm_name) + """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
""".format(
|
||||
hostname=vm_name
|
||||
)
|
||||
+ """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, 'w') as fh:
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
@ -212,35 +220,29 @@ GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(root_disk=root_disk['scsi_id'])
|
||||
""".format(
|
||||
root_disk=root_disk["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
||||
with chroot_target(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id'])
|
||||
)
|
||||
os.system(
|
||||
"update-grub"
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_disk["pool"], vm_name, root_disk["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
# Set a really dumb root password [TEMPORARY]
|
||||
os.system(
|
||||
"echo root:test123 | chpasswd"
|
||||
)
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
# Enable cloud-init target on (first) boot
|
||||
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system(
|
||||
"systemctl enable cloud-init.target"
|
||||
)
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system(
|
||||
"umount {}/dev".format(
|
||||
temporary_directory
|
||||
)
|
||||
)
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
# Everything else is done via cloud-init user-data
|
||||
|
@ -35,9 +35,9 @@ def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs['vm_name']
|
||||
temporary_directory = kwargs['temporary_directory']
|
||||
disks = kwargs['disks']
|
||||
networks = kwargs['networks']
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# No operation - this script just returns
|
||||
pass
|
||||
|
@ -28,7 +28,7 @@ from pvcapid.models import * # noqa F401,F403
|
||||
migrate = Migrate(app, db)
|
||||
manager = Manager(app)
|
||||
|
||||
manager.add_command('db', MigrateCommand)
|
||||
manager.add_command("db", MigrateCommand)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
manager.run()
|
||||
|
@ -25,7 +25,7 @@ import yaml
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = '0.9.42'
|
||||
version = "0.9.42"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
@ -35,6 +35,7 @@ API_VERSION = 1.0
|
||||
# Helper Functions
|
||||
##########################################################
|
||||
|
||||
|
||||
def strtobool(stringv):
|
||||
if stringv is None:
|
||||
return False
|
||||
@ -52,54 +53,64 @@ def strtobool(stringv):
|
||||
|
||||
# Parse the configuration file
|
||||
try:
|
||||
pvcapid_config_file = os.environ['PVC_CONFIG_FILE']
|
||||
pvcapid_config_file = os.environ["PVC_CONFIG_FILE"]
|
||||
except Exception:
|
||||
print('Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.')
|
||||
print(
|
||||
'Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.'
|
||||
)
|
||||
exit(1)
|
||||
|
||||
print('Loading configuration from file "{}"'.format(pvcapid_config_file))
|
||||
|
||||
# Read in the config
|
||||
try:
|
||||
with open(pvcapid_config_file, 'r') as cfgfile:
|
||||
with open(pvcapid_config_file, "r") as cfgfile:
|
||||
o_config = yaml.load(cfgfile, Loader=yaml.BaseLoader)
|
||||
except Exception as e:
|
||||
print('ERROR: Failed to parse configuration file: {}'.format(e))
|
||||
print("ERROR: Failed to parse configuration file: {}".format(e))
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
# Create the config object
|
||||
config = {
|
||||
'debug': strtobool(o_config['pvc']['debug']),
|
||||
'coordinators': o_config['pvc']['coordinators'],
|
||||
'listen_address': o_config['pvc']['api']['listen_address'],
|
||||
'listen_port': int(o_config['pvc']['api']['listen_port']),
|
||||
'auth_enabled': strtobool(o_config['pvc']['api']['authentication']['enabled']),
|
||||
'auth_secret_key': o_config['pvc']['api']['authentication']['secret_key'],
|
||||
'auth_tokens': o_config['pvc']['api']['authentication']['tokens'],
|
||||
'ssl_enabled': strtobool(o_config['pvc']['api']['ssl']['enabled']),
|
||||
'ssl_key_file': o_config['pvc']['api']['ssl']['key_file'],
|
||||
'ssl_cert_file': o_config['pvc']['api']['ssl']['cert_file'],
|
||||
'database_host': o_config['pvc']['provisioner']['database']['host'],
|
||||
'database_port': int(o_config['pvc']['provisioner']['database']['port']),
|
||||
'database_name': o_config['pvc']['provisioner']['database']['name'],
|
||||
'database_user': o_config['pvc']['provisioner']['database']['user'],
|
||||
'database_password': o_config['pvc']['provisioner']['database']['pass'],
|
||||
'queue_host': o_config['pvc']['provisioner']['queue']['host'],
|
||||
'queue_port': o_config['pvc']['provisioner']['queue']['port'],
|
||||
'queue_path': o_config['pvc']['provisioner']['queue']['path'],
|
||||
'storage_hosts': o_config['pvc']['provisioner']['ceph_cluster']['storage_hosts'],
|
||||
'storage_domain': o_config['pvc']['provisioner']['ceph_cluster']['storage_domain'],
|
||||
'ceph_monitor_port': o_config['pvc']['provisioner']['ceph_cluster']['ceph_monitor_port'],
|
||||
'ceph_storage_secret_uuid': o_config['pvc']['provisioner']['ceph_cluster']['ceph_storage_secret_uuid']
|
||||
"debug": strtobool(o_config["pvc"]["debug"]),
|
||||
"coordinators": o_config["pvc"]["coordinators"],
|
||||
"listen_address": o_config["pvc"]["api"]["listen_address"],
|
||||
"listen_port": int(o_config["pvc"]["api"]["listen_port"]),
|
||||
"auth_enabled": strtobool(o_config["pvc"]["api"]["authentication"]["enabled"]),
|
||||
"auth_secret_key": o_config["pvc"]["api"]["authentication"]["secret_key"],
|
||||
"auth_tokens": o_config["pvc"]["api"]["authentication"]["tokens"],
|
||||
"ssl_enabled": strtobool(o_config["pvc"]["api"]["ssl"]["enabled"]),
|
||||
"ssl_key_file": o_config["pvc"]["api"]["ssl"]["key_file"],
|
||||
"ssl_cert_file": o_config["pvc"]["api"]["ssl"]["cert_file"],
|
||||
"database_host": o_config["pvc"]["provisioner"]["database"]["host"],
|
||||
"database_port": int(o_config["pvc"]["provisioner"]["database"]["port"]),
|
||||
"database_name": o_config["pvc"]["provisioner"]["database"]["name"],
|
||||
"database_user": o_config["pvc"]["provisioner"]["database"]["user"],
|
||||
"database_password": o_config["pvc"]["provisioner"]["database"]["pass"],
|
||||
"queue_host": o_config["pvc"]["provisioner"]["queue"]["host"],
|
||||
"queue_port": o_config["pvc"]["provisioner"]["queue"]["port"],
|
||||
"queue_path": o_config["pvc"]["provisioner"]["queue"]["path"],
|
||||
"storage_hosts": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
||||
"storage_hosts"
|
||||
],
|
||||
"storage_domain": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
||||
"storage_domain"
|
||||
],
|
||||
"ceph_monitor_port": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
||||
"ceph_monitor_port"
|
||||
],
|
||||
"ceph_storage_secret_uuid": o_config["pvc"]["provisioner"]["ceph_cluster"][
|
||||
"ceph_storage_secret_uuid"
|
||||
],
|
||||
}
|
||||
|
||||
# Use coordinators as storage hosts if not explicitly specified
|
||||
if not config['storage_hosts']:
|
||||
config['storage_hosts'] = config['coordinators']
|
||||
if not config["storage_hosts"]:
|
||||
config["storage_hosts"] = config["coordinators"]
|
||||
|
||||
except Exception as e:
|
||||
print('ERROR: Failed to load configuration: {}'.format(e))
|
||||
print("ERROR: Failed to load configuration: {}".format(e))
|
||||
exit(1)
|
||||
|
||||
|
||||
@ -107,31 +118,41 @@ except Exception as e:
|
||||
# Entrypoint
|
||||
##########################################################
|
||||
|
||||
|
||||
def entrypoint():
|
||||
import pvcapid.flaskapi as pvc_api # noqa: E402
|
||||
|
||||
if config['ssl_enabled']:
|
||||
context = (config['ssl_cert_file'], config['ssl_key_file'])
|
||||
if config["ssl_enabled"]:
|
||||
context = (config["ssl_cert_file"], config["ssl_key_file"])
|
||||
else:
|
||||
context = None
|
||||
|
||||
# Print our startup messages
|
||||
print('')
|
||||
print('|----------------------------------------------------------|')
|
||||
print('| |')
|
||||
print('| ███████████ ▜█▙ ▟█▛ █████ █ █ █ |')
|
||||
print('| ██ ▜█▙ ▟█▛ ██ |')
|
||||
print('| ███████████ ▜█▙ ▟█▛ ██ |')
|
||||
print('| ██ ▜█▙▟█▛ ███████████ |')
|
||||
print('| |')
|
||||
print('|----------------------------------------------------------|')
|
||||
print('| Parallel Virtual Cluster API daemon v{0: <19} |'.format(version))
|
||||
print('| Debug: {0: <49} |'.format(str(config['debug'])))
|
||||
print('| API version: v{0: <42} |'.format(API_VERSION))
|
||||
print('| Listen: {0: <48} |'.format('{}:{}'.format(config['listen_address'], config['listen_port'])))
|
||||
print('| SSL: {0: <51} |'.format(str(config['ssl_enabled'])))
|
||||
print('| Authentication: {0: <40} |'.format(str(config['auth_enabled'])))
|
||||
print('|----------------------------------------------------------|')
|
||||
print('')
|
||||
print("")
|
||||
print("|----------------------------------------------------------|")
|
||||
print("| |")
|
||||
print("| ███████████ ▜█▙ ▟█▛ █████ █ █ █ |")
|
||||
print("| ██ ▜█▙ ▟█▛ ██ |")
|
||||
print("| ███████████ ▜█▙ ▟█▛ ██ |")
|
||||
print("| ██ ▜█▙▟█▛ ███████████ |")
|
||||
print("| |")
|
||||
print("|----------------------------------------------------------|")
|
||||
print("| Parallel Virtual Cluster API daemon v{0: <19} |".format(version))
|
||||
print("| Debug: {0: <49} |".format(str(config["debug"])))
|
||||
print("| API version: v{0: <42} |".format(API_VERSION))
|
||||
print(
|
||||
"| Listen: {0: <48} |".format(
|
||||
"{}:{}".format(config["listen_address"], config["listen_port"])
|
||||
)
|
||||
)
|
||||
print("| SSL: {0: <51} |".format(str(config["ssl_enabled"])))
|
||||
print("| Authentication: {0: <40} |".format(str(config["auth_enabled"])))
|
||||
print("|----------------------------------------------------------|")
|
||||
print("")
|
||||
|
||||
pvc_api.app.run(config['listen_address'], config['listen_port'], threaded=True, ssl_context=context)
|
||||
pvc_api.app.run(
|
||||
config["listen_address"],
|
||||
config["listen_port"],
|
||||
threaded=True,
|
||||
ssl_context=context,
|
||||
)
|
||||
|
@ -39,7 +39,10 @@ class BenchmarkError(Exception):
|
||||
"""
|
||||
An exception that results from the Benchmark job.
|
||||
"""
|
||||
def __init__(self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None):
|
||||
|
||||
def __init__(
|
||||
self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||
):
|
||||
self.message = message
|
||||
if job_name is not None:
|
||||
# Clean up our dangling result
|
||||
@ -54,6 +57,7 @@ class BenchmarkError(Exception):
|
||||
def __str__(self):
|
||||
return str(self.message)
|
||||
|
||||
|
||||
#
|
||||
# Common functions
|
||||
#
|
||||
@ -62,11 +66,11 @@ class BenchmarkError(Exception):
|
||||
# Database connections
|
||||
def open_database(config):
|
||||
conn = psycopg2.connect(
|
||||
host=config['database_host'],
|
||||
port=config['database_port'],
|
||||
dbname=config['database_name'],
|
||||
user=config['database_user'],
|
||||
password=config['database_password']
|
||||
host=config["database_host"],
|
||||
port=config["database_port"],
|
||||
dbname=config["database_name"],
|
||||
user=config["database_user"],
|
||||
password=config["database_password"],
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
return conn, cur
|
||||
@ -81,10 +85,10 @@ def close_database(conn, cur, failed=False):
|
||||
|
||||
def list_benchmarks(job=None):
|
||||
if job is not None:
|
||||
query = "SELECT * FROM {} WHERE job = %s;".format('storage_benchmarks')
|
||||
args = (job, )
|
||||
query = "SELECT * FROM {} WHERE job = %s;".format("storage_benchmarks")
|
||||
args = (job,)
|
||||
else:
|
||||
query = "SELECT * FROM {} ORDER BY id DESC;".format('storage_benchmarks')
|
||||
query = "SELECT * FROM {} ORDER BY id DESC;".format("storage_benchmarks")
|
||||
args = ()
|
||||
|
||||
conn, cur = open_database(config)
|
||||
@ -93,23 +97,23 @@ def list_benchmarks(job=None):
|
||||
data = list()
|
||||
for benchmark in orig_data:
|
||||
benchmark_data = dict()
|
||||
benchmark_data['id'] = benchmark['id']
|
||||
benchmark_data['job'] = benchmark['job']
|
||||
benchmark_data['test_format'] = benchmark['test_format']
|
||||
if benchmark['result'] == 'Running':
|
||||
benchmark_data['benchmark_result'] = 'Running'
|
||||
benchmark_data["id"] = benchmark["id"]
|
||||
benchmark_data["job"] = benchmark["job"]
|
||||
benchmark_data["test_format"] = benchmark["test_format"]
|
||||
if benchmark["result"] == "Running":
|
||||
benchmark_data["benchmark_result"] = "Running"
|
||||
else:
|
||||
try:
|
||||
benchmark_data['benchmark_result'] = loads(benchmark['result'])
|
||||
benchmark_data["benchmark_result"] = loads(benchmark["result"])
|
||||
except Exception:
|
||||
benchmark_data['benchmark_result'] = {}
|
||||
benchmark_data["benchmark_result"] = {}
|
||||
# Append the new data to our actual output structure
|
||||
data.append(benchmark_data)
|
||||
close_database(conn, cur)
|
||||
if data:
|
||||
return data, 200
|
||||
else:
|
||||
return {'message': 'No benchmark found.'}, 404
|
||||
return {"message": "No benchmark found."}, 404
|
||||
|
||||
|
||||
def run_benchmark(self, pool):
|
||||
@ -126,46 +130,68 @@ def run_benchmark(self, pool):
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Postgres')
|
||||
print("FATAL - failed to connect to Postgres")
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
zkhandler = ZKHandler(config)
|
||||
zkhandler.connect()
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Zookeeper')
|
||||
print("FATAL - failed to connect to Zookeeper")
|
||||
raise Exception
|
||||
|
||||
cur_time = datetime.now().isoformat(timespec='seconds')
|
||||
cur_primary = zkhandler.read('base.config.primary_node')
|
||||
job_name = '{}_{}'.format(cur_time, cur_primary)
|
||||
cur_time = datetime.now().isoformat(timespec="seconds")
|
||||
cur_primary = zkhandler.read("base.config.primary_node")
|
||||
job_name = "{}_{}".format(cur_time, cur_primary)
|
||||
|
||||
print("Starting storage benchmark '{}' on pool '{}'".format(job_name, pool))
|
||||
|
||||
print("Storing running status for job '{}' in database".format(job_name))
|
||||
try:
|
||||
query = "INSERT INTO storage_benchmarks (job, test_format, result) VALUES (%s, %s, %s);"
|
||||
args = (job_name, TEST_FORMAT, "Running",)
|
||||
args = (
|
||||
job_name,
|
||||
TEST_FORMAT,
|
||||
"Running",
|
||||
)
|
||||
db_cur.execute(query, args)
|
||||
db_conn.commit()
|
||||
except Exception as e:
|
||||
raise BenchmarkError("Failed to store running status: {}".format(e), job_name=job_name, db_conn=db_conn, db_cur=db_cur, zkhandler=zkhandler)
|
||||
raise BenchmarkError(
|
||||
"Failed to store running status: {}".format(e),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
|
||||
# Phase 1 - volume preparation
|
||||
self.update_state(state='RUNNING', meta={'current': 1, 'total': 3, 'status': 'Creating benchmark volume'})
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 1, "total": 3, "status": "Creating benchmark volume"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
volume = 'pvcbenchmark'
|
||||
volume = "pvcbenchmark"
|
||||
|
||||
# Create the RBD volume
|
||||
retcode, retmsg = pvc_ceph.add_volume(zkhandler, pool, volume, "8G")
|
||||
if not retcode:
|
||||
raise BenchmarkError('Failed to create volume "{}": {}'.format(volume, retmsg), job_name=job_name, db_conn=db_conn, db_cur=db_cur, zkhandler=zkhandler)
|
||||
raise BenchmarkError(
|
||||
'Failed to create volume "{}": {}'.format(volume, retmsg),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
# Phase 2 - benchmark run
|
||||
self.update_state(state='RUNNING', meta={'current': 2, 'total': 3, 'status': 'Running fio benchmarks on volume'})
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 2, "total": 3, "status": "Running fio benchmarks on volume"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
||||
@ -180,53 +206,43 @@ def run_benchmark(self, pool):
|
||||
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
||||
# for a variety of workloads.
|
||||
test_matrix = {
|
||||
'seq_read': {
|
||||
'direction': 'read',
|
||||
'iodepth': '64',
|
||||
'bs': '4M',
|
||||
'rw': 'read'
|
||||
"seq_read": {"direction": "read", "iodepth": "64", "bs": "4M", "rw": "read"},
|
||||
"seq_write": {"direction": "write", "iodepth": "64", "bs": "4M", "rw": "write"},
|
||||
"rand_read_4M": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randread",
|
||||
},
|
||||
'seq_write': {
|
||||
'direction': 'write',
|
||||
'iodepth': '64',
|
||||
'bs': '4M',
|
||||
'rw': 'write'
|
||||
"rand_write_4M": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4M",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
'rand_read_4M': {
|
||||
'direction': 'read',
|
||||
'iodepth': '64',
|
||||
'bs': '4M',
|
||||
'rw': 'randread'
|
||||
"rand_read_4K": {
|
||||
"direction": "read",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
'rand_write_4M': {
|
||||
'direction': 'write',
|
||||
'iodepth': '64',
|
||||
'bs': '4M',
|
||||
'rw': 'randwrite'
|
||||
"rand_write_4K": {
|
||||
"direction": "write",
|
||||
"iodepth": "64",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
'rand_read_4K': {
|
||||
'direction': 'read',
|
||||
'iodepth': '64',
|
||||
'bs': '4K',
|
||||
'rw': 'randread'
|
||||
"rand_read_4K_lowdepth": {
|
||||
"direction": "read",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randread",
|
||||
},
|
||||
'rand_write_4K': {
|
||||
'direction': 'write',
|
||||
'iodepth': '64',
|
||||
'bs': '4K',
|
||||
'rw': 'randwrite'
|
||||
},
|
||||
'rand_read_4K_lowdepth': {
|
||||
'direction': 'read',
|
||||
'iodepth': '1',
|
||||
'bs': '4K',
|
||||
'rw': 'randread'
|
||||
},
|
||||
'rand_write_4K_lowdepth': {
|
||||
'direction': 'write',
|
||||
'iodepth': '1',
|
||||
'bs': '4K',
|
||||
'rw': 'randwrite'
|
||||
"rand_write_4K_lowdepth": {
|
||||
"direction": "write",
|
||||
"iodepth": "1",
|
||||
"bs": "4K",
|
||||
"rw": "randwrite",
|
||||
},
|
||||
}
|
||||
|
||||
@ -253,25 +269,41 @@ def run_benchmark(self, pool):
|
||||
test=test,
|
||||
pool=pool,
|
||||
volume=volume,
|
||||
iodepth=test_matrix[test]['iodepth'],
|
||||
bs=test_matrix[test]['bs'],
|
||||
rw=test_matrix[test]['rw'])
|
||||
iodepth=test_matrix[test]["iodepth"],
|
||||
bs=test_matrix[test]["bs"],
|
||||
rw=test_matrix[test]["rw"],
|
||||
)
|
||||
|
||||
print("Running fio job: {}".format(' '.join(fio_cmd.split())))
|
||||
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
||||
if retcode:
|
||||
raise BenchmarkError("Failed to run fio test: {}".format(stderr), job_name=job_name, db_conn=db_conn, db_cur=db_cur, zkhandler=zkhandler)
|
||||
raise BenchmarkError(
|
||||
"Failed to run fio test: {}".format(stderr),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
|
||||
results[test] = loads(stdout)
|
||||
|
||||
# Phase 3 - cleanup
|
||||
self.update_state(state='RUNNING', meta={'current': 3, 'total': 3, 'status': 'Cleaning up and storing results'})
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 3, "total": 3, "status": "Cleaning up and storing results"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Remove the RBD volume
|
||||
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, volume)
|
||||
if not retcode:
|
||||
raise BenchmarkError('Failed to remove volume "{}": {}'.format(volume, retmsg), job_name=job_name, db_conn=db_conn, db_cur=db_cur, zkhandler=zkhandler)
|
||||
raise BenchmarkError(
|
||||
'Failed to remove volume "{}": {}'.format(volume, retmsg),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
@ -282,10 +314,20 @@ def run_benchmark(self, pool):
|
||||
db_cur.execute(query, args)
|
||||
db_conn.commit()
|
||||
except Exception as e:
|
||||
raise BenchmarkError("Failed to store test results: {}".format(e), job_name=job_name, db_conn=db_conn, db_cur=db_cur, zkhandler=zkhandler)
|
||||
raise BenchmarkError(
|
||||
"Failed to store test results: {}".format(e),
|
||||
job_name=job_name,
|
||||
db_conn=db_conn,
|
||||
db_cur=db_cur,
|
||||
zkhandler=zkhandler,
|
||||
)
|
||||
|
||||
close_database(db_conn, db_cur)
|
||||
zkhandler.disconnect()
|
||||
del zkhandler
|
||||
|
||||
return {'status': "Storage benchmark '{}' completed successfully.", 'current': 3, 'total': 3}
|
||||
return {
|
||||
"status": "Storage benchmark '{}' completed successfully.",
|
||||
"current": 3,
|
||||
"total": 3,
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -23,7 +23,7 @@ from pvcapid.flaskapi import db
|
||||
|
||||
|
||||
class DBSystemTemplate(db.Model):
|
||||
__tablename__ = 'system_template'
|
||||
__tablename__ = "system_template"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -38,7 +38,20 @@ class DBSystemTemplate(db.Model):
|
||||
migration_method = db.Column(db.Text)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=True)
|
||||
|
||||
def __init__(self, name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart, migration_method, ova=None):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
vcpu_count,
|
||||
vram_mb,
|
||||
serial,
|
||||
vnc,
|
||||
vnc_bind,
|
||||
node_limit,
|
||||
node_selector,
|
||||
node_autostart,
|
||||
migration_method,
|
||||
ova=None,
|
||||
):
|
||||
self.name = name
|
||||
self.vcpu_count = vcpu_count
|
||||
self.vram_mb = vram_mb
|
||||
@ -52,11 +65,11 @@ class DBSystemTemplate(db.Model):
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBNetworkTemplate(db.Model):
|
||||
__tablename__ = 'network_template'
|
||||
__tablename__ = "network_template"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -69,14 +82,16 @@ class DBNetworkTemplate(db.Model):
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBNetworkElement(db.Model):
|
||||
__tablename__ = 'network'
|
||||
__tablename__ = "network"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
network_template = db.Column(db.Integer, db.ForeignKey("network_template.id"), nullable=False)
|
||||
network_template = db.Column(
|
||||
db.Integer, db.ForeignKey("network_template.id"), nullable=False
|
||||
)
|
||||
vni = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, network_template, vni):
|
||||
@ -84,11 +99,11 @@ class DBNetworkElement(db.Model):
|
||||
self.vni = vni
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBStorageTemplate(db.Model):
|
||||
__tablename__ = 'storage_template'
|
||||
__tablename__ = "storage_template"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -99,14 +114,16 @@ class DBStorageTemplate(db.Model):
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBStorageElement(db.Model):
|
||||
__tablename__ = 'storage'
|
||||
__tablename__ = "storage"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
storage_template = db.Column(db.Integer, db.ForeignKey("storage_template.id"), nullable=False)
|
||||
storage_template = db.Column(
|
||||
db.Integer, db.ForeignKey("storage_template.id"), nullable=False
|
||||
)
|
||||
pool = db.Column(db.Text, nullable=False)
|
||||
disk_id = db.Column(db.Text, nullable=False)
|
||||
source_volume = db.Column(db.Text)
|
||||
@ -115,7 +132,17 @@ class DBStorageElement(db.Model):
|
||||
filesystem = db.Column(db.Text)
|
||||
filesystem_args = db.Column(db.Text)
|
||||
|
||||
def __init__(self, storage_template, pool, disk_id, source_volume, disk_size_gb, mountpoint, filesystem, filesystem_args):
|
||||
def __init__(
|
||||
self,
|
||||
storage_template,
|
||||
pool,
|
||||
disk_id,
|
||||
source_volume,
|
||||
disk_size_gb,
|
||||
mountpoint,
|
||||
filesystem,
|
||||
filesystem_args,
|
||||
):
|
||||
self.storage_template = storage_template
|
||||
self.pool = pool
|
||||
self.disk_id = disk_id
|
||||
@ -126,11 +153,11 @@ class DBStorageElement(db.Model):
|
||||
self.filesystem_args = filesystem_args
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBUserdata(db.Model):
|
||||
__tablename__ = 'userdata'
|
||||
__tablename__ = "userdata"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -141,11 +168,11 @@ class DBUserdata(db.Model):
|
||||
self.userdata = userdata
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBScript(db.Model):
|
||||
__tablename__ = 'script'
|
||||
__tablename__ = "script"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -156,11 +183,11 @@ class DBScript(db.Model):
|
||||
self.script = script
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBOva(db.Model):
|
||||
__tablename__ = 'ova'
|
||||
__tablename__ = "ova"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -171,11 +198,11 @@ class DBOva(db.Model):
|
||||
self.ovf = ovf
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBOvaVolume(db.Model):
|
||||
__tablename__ = 'ova_volume'
|
||||
__tablename__ = "ova_volume"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=False)
|
||||
@ -194,11 +221,11 @@ class DBOvaVolume(db.Model):
|
||||
self.disk_size_gb = disk_size_gb
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBProfile(db.Model):
|
||||
__tablename__ = 'profile'
|
||||
__tablename__ = "profile"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
@ -211,7 +238,18 @@ class DBProfile(db.Model):
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"))
|
||||
arguments = db.Column(db.Text)
|
||||
|
||||
def __init__(self, name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
profile_type,
|
||||
system_template,
|
||||
network_template,
|
||||
storage_template,
|
||||
userdata,
|
||||
script,
|
||||
ova,
|
||||
arguments,
|
||||
):
|
||||
self.name = name
|
||||
self.profile_type = profile_type
|
||||
self.system_template = system_template
|
||||
@ -223,15 +261,15 @@ class DBProfile(db.Model):
|
||||
self.arguments = arguments
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
||||
|
||||
class DBStorageBenchmarks(db.Model):
|
||||
__tablename__ = 'storage_benchmarks'
|
||||
__tablename__ = "storage_benchmarks"
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
job = db.Column(db.Text, nullable=False)
|
||||
test_format = db.Column(db.Integer, nullable=False, default=0, server_default='0')
|
||||
test_format = db.Column(db.Integer, nullable=False, default=0, server_default="0")
|
||||
result = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, job, result, test_format):
|
||||
@ -240,4 +278,4 @@ class DBStorageBenchmarks(db.Model):
|
||||
self.test_format = test_format
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
return "<id {}>".format(self.id)
|
||||
|
@ -47,11 +47,11 @@ import pvcapid.provisioner as provisioner
|
||||
# Database connections
|
||||
def open_database(config):
|
||||
conn = psycopg2.connect(
|
||||
host=config['database_host'],
|
||||
port=config['database_port'],
|
||||
dbname=config['database_name'],
|
||||
user=config['database_user'],
|
||||
password=config['database_password']
|
||||
host=config["database_host"],
|
||||
port=config["database_port"],
|
||||
dbname=config["database_name"],
|
||||
user=config["database_user"],
|
||||
password=config["database_password"],
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
return conn, cur
|
||||
@ -71,19 +71,19 @@ def list_ova(limit, is_fuzzy=True):
|
||||
if limit:
|
||||
if is_fuzzy:
|
||||
# Handle fuzzy vs. non-fuzzy limits
|
||||
if not re.match(r'\^.*', limit):
|
||||
limit = '%' + limit
|
||||
if not re.match(r"\^.*", limit):
|
||||
limit = "%" + limit
|
||||
else:
|
||||
limit = limit[1:]
|
||||
if not re.match(r'.*\$', limit):
|
||||
limit = limit + '%'
|
||||
if not re.match(r".*\$", limit):
|
||||
limit = limit + "%"
|
||||
else:
|
||||
limit = limit[:-1]
|
||||
|
||||
query = "SELECT id, name FROM {} WHERE name LIKE %s;".format('ova')
|
||||
args = (limit, )
|
||||
query = "SELECT id, name FROM {} WHERE name LIKE %s;".format("ova")
|
||||
args = (limit,)
|
||||
else:
|
||||
query = "SELECT id, name FROM {};".format('ova')
|
||||
query = "SELECT id, name FROM {};".format("ova")
|
||||
args = ()
|
||||
|
||||
conn, cur = open_database(config)
|
||||
@ -94,34 +94,36 @@ def list_ova(limit, is_fuzzy=True):
|
||||
ova_data = list()
|
||||
|
||||
for ova in data:
|
||||
ova_id = ova.get('id')
|
||||
ova_name = ova.get('name')
|
||||
ova_id = ova.get("id")
|
||||
ova_name = ova.get("name")
|
||||
|
||||
query = "SELECT pool, volume_name, volume_format, disk_id, disk_size_gb FROM {} WHERE ova = %s;".format('ova_volume')
|
||||
query = "SELECT pool, volume_name, volume_format, disk_id, disk_size_gb FROM {} WHERE ova = %s;".format(
|
||||
"ova_volume"
|
||||
)
|
||||
args = (ova_id,)
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
volumes = cur.fetchall()
|
||||
close_database(conn, cur)
|
||||
|
||||
ova_data.append({'id': ova_id, 'name': ova_name, 'volumes': volumes})
|
||||
ova_data.append({"id": ova_id, "name": ova_name, "volumes": volumes})
|
||||
|
||||
if ova_data:
|
||||
return ova_data, 200
|
||||
else:
|
||||
return {'message': 'No OVAs found.'}, 404
|
||||
return {"message": "No OVAs found."}, 404
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def delete_ova(zkhandler, name):
|
||||
ova_data, retcode = list_ova(name, is_fuzzy=False)
|
||||
if retcode != 200:
|
||||
retmsg = {'message': 'The OVA "{}" does not exist.'.format(name)}
|
||||
retmsg = {"message": 'The OVA "{}" does not exist.'.format(name)}
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
||||
conn, cur = open_database(config)
|
||||
ova_id = ova_data[0].get('id')
|
||||
ova_id = ova_data[0].get("id")
|
||||
try:
|
||||
# Get the list of volumes for this OVA
|
||||
query = "SELECT pool, volume_name FROM ova_volume WHERE ova = %s;"
|
||||
@ -131,7 +133,9 @@ def delete_ova(zkhandler, name):
|
||||
|
||||
# Remove each volume for this OVA
|
||||
for volume in volumes:
|
||||
pvc_ceph.remove_volume(zkhandler, volume.get('pool'), volume.get('volume_name'))
|
||||
pvc_ceph.remove_volume(
|
||||
zkhandler, volume.get("pool"), volume.get("volume_name")
|
||||
)
|
||||
|
||||
# Delete the volume entries from the database
|
||||
query = "DELETE FROM ova_volume WHERE ova = %s;"
|
||||
@ -156,7 +160,7 @@ def delete_ova(zkhandler, name):
|
||||
retmsg = {"message": 'Removed OVA image "{}".'.format(name)}
|
||||
retcode = 200
|
||||
except Exception as e:
|
||||
retmsg = {'message': 'Failed to remove OVA "{}": {}'.format(name, e)}
|
||||
retmsg = {"message": 'Failed to remove OVA "{}": {}'.format(name, e)}
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return retmsg, retcode
|
||||
@ -174,20 +178,22 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Unmap the OVA temporary blockdev
|
||||
retflag, retdata = pvc_ceph.unmap_volume(zkhandler, pool, "ova_{}".format(name))
|
||||
# Remove the OVA temporary blockdev
|
||||
retflag, retdata = pvc_ceph.remove_volume(zkhandler, pool, "ova_{}".format(name))
|
||||
retflag, retdata = pvc_ceph.remove_volume(
|
||||
zkhandler, pool, "ova_{}".format(name)
|
||||
)
|
||||
|
||||
# Normalize the OVA size to bytes
|
||||
ova_size_bytes = pvc_ceph.format_bytes_fromhuman(ova_size)
|
||||
ova_size = '{}B'.format(ova_size_bytes)
|
||||
ova_size = "{}B".format(ova_size_bytes)
|
||||
|
||||
# Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
|
||||
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
||||
pool_free_space_bytes = int(pool_information['stats']['free_bytes'])
|
||||
pool_free_space_bytes = int(pool_information["stats"]["free_bytes"])
|
||||
if ova_size_bytes * 2 >= pool_free_space_bytes:
|
||||
output = {
|
||||
'message': "The cluster does not have enough free space ({}) to store the OVA volume ({}).".format(
|
||||
"message": "The cluster does not have enough free space ({}) to store the OVA volume ({}).".format(
|
||||
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
|
||||
pvc_ceph.format_bytes_tohuman(ova_size_bytes)
|
||||
pvc_ceph.format_bytes_tohuman(ova_size_bytes),
|
||||
)
|
||||
}
|
||||
retcode = 400
|
||||
@ -195,11 +201,11 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
return output, retcode
|
||||
|
||||
# Create a temporary OVA blockdev
|
||||
retflag, retdata = pvc_ceph.add_volume(zkhandler, pool, "ova_{}".format(name), ova_size)
|
||||
retflag, retdata = pvc_ceph.add_volume(
|
||||
zkhandler, pool, "ova_{}".format(name), ova_size
|
||||
)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
@ -207,9 +213,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Map the temporary OVA blockdev
|
||||
retflag, retdata = pvc_ceph.map_volume(zkhandler, pool, "ova_{}".format(name))
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
@ -221,13 +225,14 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# rather than the standard stream_factory which writes to a temporary file waiting
|
||||
# on a save() call. This will break if the API ever uploaded multiple files, but
|
||||
# this is an acceptable workaround.
|
||||
def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
|
||||
return open(ova_blockdev, 'wb')
|
||||
def ova_stream_factory(
|
||||
total_content_length, filename, content_type, content_length=None
|
||||
):
|
||||
return open(ova_blockdev, "wb")
|
||||
|
||||
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to upload or write OVA file to temporary volume."
|
||||
}
|
||||
output = {"message": "Failed to upload or write OVA file to temporary volume."}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
@ -238,15 +243,13 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Determine the files in the OVA
|
||||
members = ova_archive.getmembers()
|
||||
except tarfile.TarError:
|
||||
output = {
|
||||
'message': "The uploaded OVA file is not readable."
|
||||
}
|
||||
output = {"message": "The uploaded OVA file is not readable."}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Parse through the members list and extract the OVF file
|
||||
for element in set(x for x in members if re.match(r'.*\.ovf$', x.name)):
|
||||
for element in set(x for x in members if re.match(r".*\.ovf$", x.name)):
|
||||
ovf_file = ova_archive.extractfile(element)
|
||||
|
||||
# Parse the OVF file to get our VM details
|
||||
@ -261,14 +264,14 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
|
||||
# Create and upload each disk volume
|
||||
for idx, disk in enumerate(disk_map):
|
||||
disk_identifier = "sd{}".format(chr(ord('a') + idx))
|
||||
disk_identifier = "sd{}".format(chr(ord("a") + idx))
|
||||
volume = "ova_{}_{}".format(name, disk_identifier)
|
||||
dev_src = disk.get('src')
|
||||
dev_src = disk.get("src")
|
||||
dev_size_raw = ova_archive.getmember(dev_src).size
|
||||
vm_volume_size = disk.get('capacity')
|
||||
vm_volume_size = disk.get("capacity")
|
||||
|
||||
# Normalize the dev size to bytes
|
||||
dev_size = '{}B'.format(pvc_ceph.format_bytes_fromhuman(dev_size_raw))
|
||||
dev_size = "{}B".format(pvc_ceph.format_bytes_fromhuman(dev_size_raw))
|
||||
|
||||
def cleanup_img_maps():
|
||||
# Unmap the temporary blockdev
|
||||
@ -277,9 +280,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Create the blockdev
|
||||
retflag, retdata = pvc_ceph.add_volume(zkhandler, pool, volume, dev_size)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
cleanup_ova_maps_and_volumes()
|
||||
@ -288,9 +289,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Map the blockdev
|
||||
retflag, retdata = pvc_ceph.map_volume(zkhandler, pool, volume)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
cleanup_ova_maps_and_volumes()
|
||||
@ -299,10 +298,10 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
|
||||
try:
|
||||
# Open (extract) the TAR archive file and seek to byte 0
|
||||
vmdk_file = ova_archive.extractfile(disk.get('src'))
|
||||
vmdk_file = ova_archive.extractfile(disk.get("src"))
|
||||
vmdk_file.seek(0)
|
||||
# Open the temporary blockdev and seek to byte 0
|
||||
blk_file = open(temp_blockdev, 'wb')
|
||||
blk_file = open(temp_blockdev, "wb")
|
||||
blk_file.seek(0)
|
||||
# Write the contents of vmdk_file into blk_file
|
||||
blk_file.write(vmdk_file.read())
|
||||
@ -311,10 +310,12 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Close vmdk_file
|
||||
vmdk_file.close()
|
||||
# Perform an OS-level sync
|
||||
pvc_common.run_os_command('sync')
|
||||
pvc_common.run_os_command("sync")
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to write image file '{}' to temporary volume.".format(disk.get('src'))
|
||||
"message": "Failed to write image file '{}' to temporary volume.".format(
|
||||
disk.get("src")
|
||||
)
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
@ -333,27 +334,25 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
cur.execute(query, args)
|
||||
close_database(conn, cur)
|
||||
except Exception as e:
|
||||
output = {
|
||||
'message': 'Failed to create OVA entry "{}": {}'.format(name, e)
|
||||
}
|
||||
output = {"message": 'Failed to create OVA entry "{}": {}'.format(name, e)}
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return output, retcode
|
||||
|
||||
# Get the OVA database id
|
||||
query = "SELECT id FROM ova WHERE name = %s;"
|
||||
args = (name, )
|
||||
args = (name,)
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
ova_id = cur.fetchone()['id']
|
||||
ova_id = cur.fetchone()["id"]
|
||||
close_database(conn, cur)
|
||||
|
||||
# Prepare disk entries in ova_volume
|
||||
for idx, disk in enumerate(disk_map):
|
||||
disk_identifier = "sd{}".format(chr(ord('a') + idx))
|
||||
volume_type = disk.get('src').split('.')[-1]
|
||||
disk_identifier = "sd{}".format(chr(ord("a") + idx))
|
||||
volume_type = disk.get("src").split(".")[-1]
|
||||
volume = "ova_{}_{}".format(name, disk_identifier)
|
||||
vm_volume_size = disk.get('capacity')
|
||||
vm_volume_size = disk.get("capacity")
|
||||
|
||||
# The function always return XXXXB, so strip off the B and convert to an integer
|
||||
vm_volume_size_bytes = pvc_ceph.format_bytes_fromhuman(vm_volume_size)
|
||||
@ -368,37 +367,49 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
close_database(conn, cur)
|
||||
except Exception as e:
|
||||
output = {
|
||||
'message': 'Failed to create OVA volume entry "{}": {}'.format(volume, e)
|
||||
"message": 'Failed to create OVA volume entry "{}": {}'.format(
|
||||
volume, e
|
||||
)
|
||||
}
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return output, retcode
|
||||
|
||||
# Prepare a system_template for the OVA
|
||||
vcpu_count = virtual_hardware.get('vcpus')
|
||||
vram_mb = virtual_hardware.get('vram')
|
||||
if virtual_hardware.get('graphics-controller') == 1:
|
||||
vcpu_count = virtual_hardware.get("vcpus")
|
||||
vram_mb = virtual_hardware.get("vram")
|
||||
if virtual_hardware.get("graphics-controller") == 1:
|
||||
vnc = True
|
||||
serial = False
|
||||
else:
|
||||
vnc = False
|
||||
serial = True
|
||||
retdata, retcode = provisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id)
|
||||
retdata, retcode = provisioner.create_template_system(
|
||||
name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id
|
||||
)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
system_template, retcode = provisioner.list_template_system(name, is_fuzzy=False)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
system_template_name = system_template[0].get('name')
|
||||
system_template_name = system_template[0].get("name")
|
||||
|
||||
# Prepare a barebones profile for the OVA
|
||||
retdata, retcode = provisioner.create_profile(name, 'ova', system_template_name, None, None, userdata=None, script=None, ova=name, arguments=None)
|
||||
retdata, retcode = provisioner.create_profile(
|
||||
name,
|
||||
"ova",
|
||||
system_template_name,
|
||||
None,
|
||||
None,
|
||||
userdata=None,
|
||||
script=None,
|
||||
ova=name,
|
||||
arguments=None,
|
||||
)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
|
||||
output = {
|
||||
'message': "Imported OVA image '{}'.".format(name)
|
||||
}
|
||||
output = {"message": "Imported OVA image '{}'.".format(name)}
|
||||
retcode = 200
|
||||
return output, retcode
|
||||
|
||||
@ -420,7 +431,7 @@ class OVFParser(object):
|
||||
"20": "other-storage-device",
|
||||
"23": "usb-controller",
|
||||
"24": "graphics-controller",
|
||||
"35": "sound-controller"
|
||||
"35": "sound-controller",
|
||||
}
|
||||
|
||||
def _getFilelist(self):
|
||||
@ -438,7 +449,10 @@ class OVFParser(object):
|
||||
cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA)
|
||||
cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA)
|
||||
current_list = self.xml.findall(path)
|
||||
results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units)) for x in current_list]
|
||||
results = [
|
||||
(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units))
|
||||
for x in current_list
|
||||
]
|
||||
return results
|
||||
|
||||
def _getAttributes(self, virtual_system, path, attribute):
|
||||
@ -451,36 +465,46 @@ class OVFParser(object):
|
||||
|
||||
# Define our schemas
|
||||
envelope_tag = self.xml.find(".")
|
||||
self.XML_SCHEMA = envelope_tag.nsmap.get('xsi')
|
||||
self.OVF_SCHEMA = envelope_tag.nsmap.get('ovf')
|
||||
self.RASD_SCHEMA = envelope_tag.nsmap.get('rasd')
|
||||
self.SASD_SCHEMA = envelope_tag.nsmap.get('sasd')
|
||||
self.VSSD_SCHEMA = envelope_tag.nsmap.get('vssd')
|
||||
self.XML_SCHEMA = envelope_tag.nsmap.get("xsi")
|
||||
self.OVF_SCHEMA = envelope_tag.nsmap.get("ovf")
|
||||
self.RASD_SCHEMA = envelope_tag.nsmap.get("rasd")
|
||||
self.SASD_SCHEMA = envelope_tag.nsmap.get("sasd")
|
||||
self.VSSD_SCHEMA = envelope_tag.nsmap.get("vssd")
|
||||
|
||||
self.ovf_version = int(self.OVF_SCHEMA.split('/')[-1])
|
||||
self.ovf_version = int(self.OVF_SCHEMA.split("/")[-1])
|
||||
|
||||
# Get the file and disk lists
|
||||
self.filelist = self._getFilelist()
|
||||
self.disklist = self._getDisklist()
|
||||
|
||||
def getVirtualSystems(self):
|
||||
return self.xml.findall("{{{schema}}}VirtualSystem".format(schema=self.OVF_SCHEMA))
|
||||
return self.xml.findall(
|
||||
"{{{schema}}}VirtualSystem".format(schema=self.OVF_SCHEMA)
|
||||
)
|
||||
|
||||
def getXML(self):
|
||||
return lxml.etree.tostring(self.xml, pretty_print=True).decode('utf8')
|
||||
return lxml.etree.tostring(self.xml, pretty_print=True).decode("utf8")
|
||||
|
||||
def getVirtualHardware(self, virtual_system):
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(
|
||||
schema=self.OVF_SCHEMA
|
||||
)
|
||||
)
|
||||
virtual_hardware = {}
|
||||
|
||||
for item in hardware_list:
|
||||
try:
|
||||
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
|
||||
item_type = self.RASD_TYPE[
|
||||
item.find(
|
||||
"{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)
|
||||
).text
|
||||
]
|
||||
except Exception:
|
||||
continue
|
||||
quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA))
|
||||
quantity = item.find(
|
||||
"{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA)
|
||||
)
|
||||
if quantity is None:
|
||||
virtual_hardware[item_type] = 1
|
||||
else:
|
||||
@ -492,11 +516,15 @@ class OVFParser(object):
|
||||
# OVF v2 uses the StorageItem field, while v1 uses the normal Item field
|
||||
if self.ovf_version < 2:
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(
|
||||
schema=self.OVF_SCHEMA
|
||||
)
|
||||
)
|
||||
else:
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA)
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(
|
||||
schema=self.OVF_SCHEMA
|
||||
)
|
||||
)
|
||||
disk_list = []
|
||||
|
||||
@ -504,38 +532,56 @@ class OVFParser(object):
|
||||
item_type = None
|
||||
|
||||
if self.SASD_SCHEMA is not None:
|
||||
item_type = self.RASD_TYPE[item.find("{{{sasd}}}ResourceType".format(sasd=self.SASD_SCHEMA)).text]
|
||||
item_type = self.RASD_TYPE[
|
||||
item.find(
|
||||
"{{{sasd}}}ResourceType".format(sasd=self.SASD_SCHEMA)
|
||||
).text
|
||||
]
|
||||
else:
|
||||
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
|
||||
item_type = self.RASD_TYPE[
|
||||
item.find(
|
||||
"{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)
|
||||
).text
|
||||
]
|
||||
|
||||
if item_type != 'disk':
|
||||
if item_type != "disk":
|
||||
continue
|
||||
|
||||
hostref = None
|
||||
if self.SASD_SCHEMA is not None:
|
||||
hostref = item.find("{{{sasd}}}HostResource".format(sasd=self.SASD_SCHEMA))
|
||||
hostref = item.find(
|
||||
"{{{sasd}}}HostResource".format(sasd=self.SASD_SCHEMA)
|
||||
)
|
||||
else:
|
||||
hostref = item.find("{{{rasd}}}HostResource".format(rasd=self.RASD_SCHEMA))
|
||||
hostref = item.find(
|
||||
"{{{rasd}}}HostResource".format(rasd=self.RASD_SCHEMA)
|
||||
)
|
||||
if hostref is None:
|
||||
continue
|
||||
disk_res = hostref.text
|
||||
|
||||
# Determine which file this disk_res ultimately represents
|
||||
(disk_id, disk_ref, disk_capacity, disk_capacity_unit) = [x for x in self.disklist if x[0] == disk_res.split('/')[-1]][0]
|
||||
(disk_id, disk_ref, disk_capacity, disk_capacity_unit) = [
|
||||
x for x in self.disklist if x[0] == disk_res.split("/")[-1]
|
||||
][0]
|
||||
(file_id, disk_src) = [x for x in self.filelist if x[0] == disk_ref][0]
|
||||
|
||||
if disk_capacity_unit is not None:
|
||||
# Handle the unit conversion
|
||||
base_unit, action, multiple = disk_capacity_unit.split()
|
||||
multiple_base, multiple_exponent = multiple.split('^')
|
||||
disk_capacity = int(disk_capacity) * (int(multiple_base) ** int(multiple_exponent))
|
||||
multiple_base, multiple_exponent = multiple.split("^")
|
||||
disk_capacity = int(disk_capacity) * (
|
||||
int(multiple_base) ** int(multiple_exponent)
|
||||
)
|
||||
|
||||
# Append the disk with all details to the list
|
||||
disk_list.append({
|
||||
"id": disk_id,
|
||||
"ref": disk_ref,
|
||||
"capacity": disk_capacity,
|
||||
"src": disk_src
|
||||
})
|
||||
disk_list.append(
|
||||
{
|
||||
"id": disk_id,
|
||||
"ref": disk_ref,
|
||||
"capacity": disk_capacity,
|
||||
"src": disk_src,
|
||||
}
|
||||
)
|
||||
|
||||
return disk_list
|
||||
|
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user