Compare commits
283 Commits
Author | SHA1 | Date | |
---|---|---|---|
d2c0d868c4 | |||
d63e757c32 | |||
5d08ad9573 | |||
81141c9b93 | |||
0f299777f1 | |||
fbbdb209c3 | |||
f85c2c2a75 | |||
adfe302f71 | |||
890023cbfc | |||
28abb018e3 | |||
d42bb74dc9 | |||
42c5f84ba7 | |||
88556f4a33 | |||
017953c2e6 | |||
82b4d3ed1b | |||
3839040092 | |||
bae366a316 | |||
84ade53fae | |||
72f47f216a | |||
9bfcab5e2b | |||
53c0d2b4f6 | |||
351076c15e | |||
42514b9a50 | |||
611e47f338 | |||
d96a23276b | |||
1523959074 | |||
ef762359f4 | |||
398d33778f | |||
a6d492ed9f | |||
11fa3b0df3 | |||
442aa4e420 | |||
3910843660 | |||
70f3fdbfb9 | |||
7cb0241a12 | |||
9fb33ed7a7 | |||
abfe0108ab | |||
567fe8f36b | |||
ec7b78b9b8 | |||
224c8082ef | |||
f9e7e9884f | |||
726501f4d4 | |||
7cc33451b9 | |||
ffaa4c033f | |||
7a27503f1b | |||
e7ab1bfddd | |||
c6e34c7dc6 | |||
f749633f7c | |||
a4b80be5ed | |||
9d7067469a | |||
891aeca388 | |||
aa5f8c93fd | |||
9366977fe6 | |||
973c78b8e0 | |||
65b44f2955 | |||
7ce1bfd930 | |||
423da08f5f | |||
45542bfd67 | |||
7bf91b1003 | |||
4fbec63bf4 | |||
b51f0a339d | |||
fc9df76570 | |||
78dec77987 | |||
6dc6dae26c | |||
0089ec4e17 | |||
486408753b | |||
169e174d85 | |||
354150f757 | |||
eb06c1494e | |||
bb7b1a2bd0 | |||
70b9caedc3 | |||
2731aa060c | |||
18bcd39b46 | |||
d210eef200 | |||
1dcc1f6d55 | |||
887e14a4e2 | |||
e4891831ce | |||
1967034493 | |||
921e57ca78 | |||
3cc7df63f2 | |||
3dbdd12d8f | |||
7e2114b536 | |||
e8e65934e3 | |||
24fda8a73f | |||
9b3ef6d610 | |||
b451c0e8e3 | |||
f9b126a106 | |||
553f96e7ef | |||
15e78aa9f0 | |||
65add58c9a | |||
0a01d84290 | |||
4afb288429 | |||
2b4d980685 | |||
985ad5edc0 | |||
0587bcbd67 | |||
42f2dedf6d | |||
0d470ae5f6 | |||
5b5b7d2276 | |||
0468eeb531 | |||
0dd719a682 | |||
09c1bb6a46 | |||
e0cb4a58c3 | |||
099c58ead8 | |||
37b23c0e59 | |||
0e5c681ada | |||
46ffe352e3 | |||
5526e13da9 | |||
ccee124c8b | |||
02343079c0 | |||
37b83aad6a | |||
876f2424e0 | |||
4438dd401f | |||
142743b2c0 | |||
bafdcf9f8c | |||
6fe74b34b2 | |||
9f86f12f1a | |||
ad45f6097f | |||
be405caa11 | |||
a1ba9d2eeb | |||
8fc5299d38 | |||
37a58d35e8 | |||
d74f68c904 | |||
15e986c158 | |||
5871380e1b | |||
2967c97f1a | |||
4cdf1f7247 | |||
deaf138e45 | |||
654a3cb7fa | |||
9b65d3271a | |||
fba39cb739 | |||
598b2025e8 | |||
70b787d1fd | |||
e1310a05f2 | |||
2ad6860dfe | |||
cebb4bbc1a | |||
a672e06dd2 | |||
1db73bb892 | |||
c1956072f0 | |||
ce60836c34 | |||
b5434ba744 | |||
f61d443773 | |||
da20b4493a | |||
440821b136 | |||
b9e5b14f94 | |||
5d2031d99e | |||
9ee5ae4826 | |||
48711000b0 | |||
82c067b591 | |||
0fab7072ac | |||
2d507f8b42 | |||
5f9836f96d | |||
95c59ba629 | |||
e724e73140 | |||
3cf90c46ad | |||
7b2180b626 | |||
72a38fd437 | |||
73eb4fb457 | |||
b580760537 | |||
683c3afea6 | |||
4c7cb1a20c | |||
90feb83eab | |||
b91923735c | |||
34c4690d49 | |||
3e351bb84a | |||
331027d124 | |||
ae4f36b881 | |||
e451426c7c | |||
611e0edd80 | |||
b413e042a6 | |||
e672d799a6 | |||
59707bad4e | |||
9c19813808 | |||
8fe50bea77 | |||
8faa3bb53d | |||
a130f19a19 | |||
a671d9d457 | |||
fee1c7dd6c | |||
b3a75d8069 | |||
c3bd6b6ecc | |||
5d58bee34f | |||
f668412941 | |||
a0ebc0d3a7 | |||
98a7005c1b | |||
44efd66f2c | |||
09aeb33d13 | |||
6563053f6c | |||
862f7ee9a8 | |||
97a560fcbe | |||
d84e94eff4 | |||
ce9d0e9603 | |||
3aea5ae34b | |||
3f5076d9ca | |||
8ed602ef9c | |||
e501345e44 | |||
d8f97d090a | |||
082648f3b2 | |||
2df8f5d407 | |||
ca65cb66b8 | |||
616d7c43ed | |||
4fe3a73980 | |||
26084741d0 | |||
4a52ff56b9 | |||
0a367898a0 | |||
ca5327b908 | |||
d36d8e0637 | |||
36588a3a81 | |||
c02bc0b46a | |||
1e4350ca6f | |||
b8852e116e | |||
9e468d3524 | |||
11f045f100 | |||
fd80eb9e22 | |||
6ac82d6ce9 | |||
b438b9b4c2 | |||
4417bd374b | |||
9d5f50f82a | |||
56a9e48163 | |||
31a117e21c | |||
57768f2583 | |||
e4e4e336b4 | |||
0caea03428 | |||
65932b20d2 | |||
1b8b32b07c | |||
39ce704969 | |||
d2a5fe59c0 | |||
8678dedfea | |||
0aefafa7f7 | |||
6db4df51c0 | |||
5ddf72855b | |||
0e05ce8b07 | |||
78780039de | |||
99f579e41a | |||
07577a52a9 | |||
45040a5635 | |||
097f0d9be4 | |||
ca68321be3 | |||
b322841edf | |||
4c58addead | |||
e811c5bbfb | |||
dd44f2f42b | |||
24c86f2c42 | |||
db558ec91f | |||
7c99618752 | |||
59ca296c58 | |||
c18c76f42c | |||
a7432281a8 | |||
d975f90f29 | |||
b16e2b4925 | |||
90f965f516 | |||
d2b52c6fe6 | |||
8125aea4f3 | |||
f3de900bdb | |||
9c7041f12c | |||
c67fc05219 | |||
760805fec1 | |||
158ed8d3f0 | |||
574623f2a8 | |||
db09b4c983 | |||
560cb609ba | |||
670596ed8e | |||
bd8536d9d1 | |||
95c59c2b39 | |||
b29c69378d | |||
ad60f4b1f1 | |||
68638d7760 | |||
4fa9878e01 | |||
602c2f9d4a | |||
c979fed10a | |||
1231ba19b7 | |||
1de57ab6f3 | |||
e419855911 | |||
49e5ce1176 | |||
92df125a77 | |||
7ace5b5056 | |||
eeb8879f73 | |||
37310e5455 | |||
26c2c2c295 | |||
d564671e1c | |||
4f25c55efc | |||
3532dcc11f | |||
ce985234c3 | |||
83704d8677 | |||
97e318a2ca | |||
4505b239eb |
@ -2,12 +2,14 @@ stages:
|
||||
- build
|
||||
- deploy
|
||||
|
||||
build:
|
||||
build_releases:
|
||||
stage: build
|
||||
before_script:
|
||||
- git submodule update --init
|
||||
script:
|
||||
- /bin/bash build-deb.sh
|
||||
- /usr/local/bin/deploy-package
|
||||
- /usr/local/bin/deploy-package -C pvc
|
||||
only:
|
||||
- master
|
||||
- tags
|
||||
except:
|
||||
- branches
|
||||
|
48
README.md
48
README.md
@ -1,4 +1,4 @@
|
||||
# PVC - The Parallel Virtual Cluster suite
|
||||
# PVC - The Parallel Virtual Cluster system
|
||||
|
||||
<p align="center">
|
||||
<img alt="Logo banner" src="https://git.bonifacelabs.ca/uploads/-/system/project/avatar/135/pvc_logo.png"/>
|
||||
@ -9,19 +9,45 @@
|
||||
<a href="https://parallelvirtualcluster.readthedocs.io/en/latest/?badge=latest"><img alt="Documentation Status" src="https://readthedocs.org/projects/parallelvirtualcluster/badge/?version=latest"/></a>
|
||||
</p>
|
||||
|
||||
PVC is a suite of Python 3 tools to manage virtualized clusters. It provides a fully-functional private cloud based on four key principles:
|
||||
**NOTICE FOR GITHUB**: This repository is a read-only mirror of the PVC repositories from my personal GitLab instance. Pull requests submitted here will not be merged. Issues submitted here will however be treated as authoritative.
|
||||
|
||||
1. Be Free Software Forever (or Bust)
|
||||
2. Be Opinionated and Efficient and Pick The Best Software
|
||||
3. Be Scalable and Redundant but Not Hyperscale
|
||||
4. Be Simple To Use, Configure, and Maintain
|
||||
PVC is a KVM+Ceph+Zookeeper-based, Free Software, scalable, redundant, self-healing, and self-managing private cloud solution designed with administrator simplicity in mind. It is built from the ground-up to be redundant at the host layer, allowing the cluster to gracefully handle the loss of nodes or their components, both due to hardware failure or due to maintenance. It is able to scale from a minimum of 3 nodes up to 12 or more nodes, while retaining performance and flexibility, allowing the administrator to build a small cluster today and grow it as needed.
|
||||
|
||||
It is designed to be an administrator-friendly but extremely powerful and rich modern private cloud system, but without the feature bloat and complexity of tools like OpenStack. With PVC, an administrator can provision, manage, and update a cluster of dozens or more hypervisors running thousands of VMs using a simple CLI tool, HTTP API, or [eventually] web interface. PVC is based entirely on Debian GNU/Linux and Free-and-Open-Source tools, providing the glue to bootstrap, provision and manage the cluster, then getting out of the administrators' way.
|
||||
The major goal of PVC is to be administrator friendly, providing the power of Enterprise-grade private clouds like OpenStack, Nutanix, and VMWare to homelabbers, SMBs, and small ISPs, without the cost or complexity. It believes in picking the best tool for a job and abstracting it behind the cluster as a whole, freeing the administrator from the boring and time-consuming task of selecting the best component, and letting them get on with the things that really matter. Administration can be done from a simple CLI or via a RESTful API capable of building full-featured web frontends or additional applications, taking a self-documenting approach to keep the administrator learning curvet as low as possible. Setup is easy and straightforward with an [ISO-based node installer](https://git.bonifacelabs.ca/parallelvirtualcluster/pvc-installer) and [Ansible role framework](https://git.bonifacelabs.ca/parallelvirtualcluster/pvc-ansible) designed to get a cluster up and running as quickly as possible. Build your cloud in an hour, grow it as you need, and never worry about it: just add physical servers.
|
||||
|
||||
Your cloud, the best way; just add physical servers.
|
||||
## Getting Started
|
||||
|
||||
[See the documentation here](https://parallelvirtualcluster.readthedocs.io/en/latest/)
|
||||
To get started with PVC, read the [Cluster Architecture document](https://parallelvirtualcluster.readthedocs.io/en/latest/architecture/cluster/), then see [Installing](https://parallelvirtualcluster.readthedocs.io/en/latest/installing) for details on setting up a set of PVC nodes, using the [PVC Ansible](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/ansible) framework to configure and bootstrap a cluster, and managing it with the [`pvc` CLI tool](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/cli) or [RESTful HTTP API](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/api). For details on the project, its motivation, and architectural details, see [the About page](https://parallelvirtualcluster.readthedocs.io/en/latest/about).
|
||||
|
||||
[See the API reference here](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/api-reference.html)
|
||||
## Changelog
|
||||
|
||||
#### v0.9.0
|
||||
|
||||
Numerous small improvements and bugfixes. This release is suitable for general use and is pre-release-quality software.
|
||||
|
||||
This release introduces an updated version scheme; all future stable releases until 1.0.0 is ready will be made under this 0.9.z naming. This does not represent semantic versioning and all changes (feature, improvement, or bugfix) will be considered for inclusion in this release train.
|
||||
|
||||
#### v0.8
|
||||
|
||||
Numerous improvements and bugfixes. This release is suitable for general use and is pre-release-quality software.
|
||||
|
||||
#### v0.7
|
||||
|
||||
Numerous improvements and bugfixes, revamped documentation. This release is suitable for general use and is beta-quality software.
|
||||
|
||||
#### v0.6
|
||||
|
||||
Numerous improvements and bugfixes, full implementation of the provisioner, full implementation of the API CLI client (versus direct CLI client). This release is suitable for general use and is beta-quality software.
|
||||
|
||||
#### v0.5
|
||||
|
||||
First public release; fully implements the VM, network, and storage managers, the HTTP API, and the pvc-ansible framework for deploying and bootstrapping a cluster. This release is suitable for general use, though it is still alpha-quality software and should be expected to change significantly until 1.0 is released.
|
||||
|
||||
#### v0.4
|
||||
|
||||
Full implementation of virtual management and virtual networking functionality. Partial implementation of storage functionality.
|
||||
|
||||
#### v0.3
|
||||
|
||||
Basic implementation of virtual management functionality.
|
||||
|
||||
**NOTICE FOR GITHUB**: This repository is a read-only mirror of the PVC repositories. Pull requests submitted here will not be merged.
|
||||
|
1
api-daemon/daemon_lib
Symbolic link
1
api-daemon/daemon_lib
Symbolic link
@ -0,0 +1 @@
|
||||
../daemon-common
|
1
api-daemon/migrations/README
Normal file
1
api-daemon/migrations/README
Normal file
@ -0,0 +1 @@
|
||||
Generic single-database configuration.
|
45
api-daemon/migrations/alembic.ini
Normal file
45
api-daemon/migrations/alembic.ini
Normal file
@ -0,0 +1,45 @@
|
||||
# A generic, single database configuration.
|
||||
|
||||
[alembic]
|
||||
# template used to generate migration files
|
||||
# file_template = %%(rev)s_%%(slug)s
|
||||
|
||||
# set to 'true' to run the environment during
|
||||
# the 'revision' command, regardless of autogenerate
|
||||
# revision_environment = false
|
||||
script_location = .
|
||||
|
||||
# Logging configuration
|
||||
[loggers]
|
||||
keys = root,sqlalchemy,alembic
|
||||
|
||||
[handlers]
|
||||
keys = console
|
||||
|
||||
[formatters]
|
||||
keys = generic
|
||||
|
||||
[logger_root]
|
||||
level = WARN
|
||||
handlers = console
|
||||
qualname =
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
|
||||
[logger_alembic]
|
||||
level = INFO
|
||||
handlers =
|
||||
qualname = alembic
|
||||
|
||||
[handler_console]
|
||||
class = StreamHandler
|
||||
args = (sys.stderr,)
|
||||
level = NOTSET
|
||||
formatter = generic
|
||||
|
||||
[formatter_generic]
|
||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
||||
datefmt = %H:%M:%S
|
87
api-daemon/migrations/env.py
Normal file
87
api-daemon/migrations/env.py
Normal file
@ -0,0 +1,87 @@
|
||||
from __future__ import with_statement
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from logging.config import fileConfig
|
||||
import logging
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
fileConfig(config.config_file_name)
|
||||
logger = logging.getLogger('alembic.env')
|
||||
|
||||
# add your model's MetaData object here
|
||||
# for 'autogenerate' support
|
||||
# from myapp import mymodel
|
||||
# target_metadata = mymodel.Base.metadata
|
||||
from flask import current_app
|
||||
config.set_main_option('sqlalchemy.url',
|
||||
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
|
||||
target_metadata = current_app.extensions['migrate'].db.metadata
|
||||
|
||||
# other values from the config, defined by the needs of env.py,
|
||||
# can be acquired:
|
||||
# my_important_option = config.get_main_option("my_important_option")
|
||||
# ... etc.
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with just a URL
|
||||
and not an Engine, though an Engine is acceptable
|
||||
here as well. By skipping the Engine creation
|
||||
we don't even need a DBAPI to be available.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
url = config.get_main_option("sqlalchemy.url")
|
||||
context.configure(url=url)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
|
||||
# this callback is used to prevent an auto-migration from being generated
|
||||
# when there are no changes to the schema
|
||||
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
|
||||
def process_revision_directives(context, revision, directives):
|
||||
if getattr(config.cmd_opts, 'autogenerate', False):
|
||||
script = directives[0]
|
||||
if script.upgrade_ops.is_empty():
|
||||
directives[:] = []
|
||||
logger.info('No changes in schema detected.')
|
||||
|
||||
engine = engine_from_config(config.get_section(config.config_ini_section),
|
||||
prefix='sqlalchemy.',
|
||||
poolclass=pool.NullPool)
|
||||
|
||||
connection = engine.connect()
|
||||
context.configure(connection=connection,
|
||||
target_metadata=target_metadata,
|
||||
process_revision_directives=process_revision_directives,
|
||||
**current_app.extensions['migrate'].configure_args)
|
||||
|
||||
try:
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
24
api-daemon/migrations/script.py.mako
Normal file
24
api-daemon/migrations/script.py.mako
Normal file
@ -0,0 +1,24 @@
|
||||
"""${message}
|
||||
|
||||
Revision ID: ${up_revision}
|
||||
Revises: ${down_revision | comma,n}
|
||||
Create Date: ${create_date}
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
${imports if imports else ""}
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = ${repr(up_revision)}
|
||||
down_revision = ${repr(down_revision)}
|
||||
branch_labels = ${repr(branch_labels)}
|
||||
depends_on = ${repr(depends_on)}
|
||||
|
||||
|
||||
def upgrade():
|
||||
${upgrades if upgrades else "pass"}
|
||||
|
||||
|
||||
def downgrade():
|
||||
${downgrades if downgrades else "pass"}
|
112
api-daemon/migrations/versions/2d1daa722a0a_pvc_version_0_6.py
Normal file
112
api-daemon/migrations/versions/2d1daa722a0a_pvc_version_0_6.py
Normal file
@ -0,0 +1,112 @@
|
||||
"""PVC version 0.6
|
||||
|
||||
Revision ID: 2d1daa722a0a
|
||||
Revises:
|
||||
Create Date: 2020-02-15 23:14:14.733134
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '2d1daa722a0a'
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('network_template',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('mac_template', sa.Text(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('script',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('script', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('storage_template',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('system_template',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('vcpu_count', sa.Integer(), nullable=False),
|
||||
sa.Column('vram_mb', sa.Integer(), nullable=False),
|
||||
sa.Column('serial', sa.Boolean(), nullable=False),
|
||||
sa.Column('vnc', sa.Boolean(), nullable=False),
|
||||
sa.Column('vnc_bind', sa.Text(), nullable=True),
|
||||
sa.Column('node_limit', sa.Text(), nullable=True),
|
||||
sa.Column('node_selector', sa.Text(), nullable=True),
|
||||
sa.Column('node_autostart', sa.Boolean(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('userdata',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('userdata', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('network',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('network_template', sa.Integer(), nullable=True),
|
||||
sa.Column('vni', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['network_template'], ['network_template.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_table('profile',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('system_template', sa.Integer(), nullable=True),
|
||||
sa.Column('network_template', sa.Integer(), nullable=True),
|
||||
sa.Column('storage_template', sa.Integer(), nullable=True),
|
||||
sa.Column('userdata', sa.Integer(), nullable=True),
|
||||
sa.Column('script', sa.Integer(), nullable=True),
|
||||
sa.Column('arguments', sa.Text(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['network_template'], ['network_template.id'], ),
|
||||
sa.ForeignKeyConstraint(['script'], ['script.id'], ),
|
||||
sa.ForeignKeyConstraint(['storage_template'], ['storage_template.id'], ),
|
||||
sa.ForeignKeyConstraint(['system_template'], ['system_template.id'], ),
|
||||
sa.ForeignKeyConstraint(['userdata'], ['userdata.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('storage',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('storage_template', sa.Integer(), nullable=True),
|
||||
sa.Column('pool', sa.Text(), nullable=False),
|
||||
sa.Column('disk_id', sa.Text(), nullable=False),
|
||||
sa.Column('source_volume', sa.Text(), nullable=True),
|
||||
sa.Column('disk_size_gb', sa.Integer(), nullable=True),
|
||||
sa.Column('mountpoint', sa.Text(), nullable=True),
|
||||
sa.Column('filesystem', sa.Text(), nullable=True),
|
||||
sa.Column('filesystem_args', sa.Text(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['storage_template'], ['storage_template.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('storage')
|
||||
op.drop_table('profile')
|
||||
op.drop_table('network')
|
||||
op.drop_table('userdata')
|
||||
op.drop_table('system_template')
|
||||
op.drop_table('storage_template')
|
||||
op.drop_table('script')
|
||||
op.drop_table('network_template')
|
||||
# ### end Alembic commands ###
|
@ -0,0 +1,33 @@
|
||||
"""PVC version 0.7
|
||||
|
||||
Revision ID: 3bc6117ea44d
|
||||
Revises: 88c8514684f7
|
||||
Create Date: 2020-08-24 14:34:36.919308
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3bc6117ea44d'
|
||||
down_revision = '88c8514684f7'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('storage_benchmarks',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('job', sa.Text(), nullable=False),
|
||||
sa.Column('result', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('storage_benchmarks')
|
||||
# ### end Alembic commands ###
|
@ -0,0 +1,76 @@
|
||||
"""PVC version 0.7
|
||||
|
||||
Revision ID: 88c8514684f7
|
||||
Revises: 2d1daa722a0a
|
||||
Create Date: 2020-02-16 19:49:50.126265
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '88c8514684f7'
|
||||
down_revision = '2d1daa722a0a'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('ova',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('ovf', sa.Text(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_table('ova_volume',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('ova', sa.Integer(), nullable=False),
|
||||
sa.Column('pool', sa.Text(), nullable=False),
|
||||
sa.Column('volume_name', sa.Text(), nullable=False),
|
||||
sa.Column('volume_format', sa.Text(), nullable=False),
|
||||
sa.Column('disk_id', sa.Text(), nullable=False),
|
||||
sa.Column('disk_size_gb', sa.Integer(), nullable=False),
|
||||
sa.ForeignKeyConstraint(['ova'], ['ova.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.alter_column('network', 'network_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
op.add_column('network_template', sa.Column('ova', sa.Integer(), nullable=True))
|
||||
op.create_foreign_key(None, 'network_template', 'ova', ['ova'], ['id'])
|
||||
op.add_column('profile', sa.Column('ova', sa.Integer(), nullable=True))
|
||||
op.add_column('profile', sa.Column('profile_type', sa.Text(), nullable=False))
|
||||
op.create_foreign_key(None, 'profile', 'ova', ['ova'], ['id'])
|
||||
op.alter_column('storage', 'storage_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
op.add_column('storage_template', sa.Column('ova', sa.Integer(), nullable=True))
|
||||
op.create_foreign_key(None, 'storage_template', 'ova', ['ova'], ['id'])
|
||||
op.add_column('system_template', sa.Column('ova', sa.Integer(), nullable=True))
|
||||
op.create_foreign_key(None, 'system_template', 'ova', ['ova'], ['id'])
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_constraint(None, 'system_template', type_='foreignkey')
|
||||
op.drop_column('system_template', 'ova')
|
||||
op.drop_constraint(None, 'storage_template', type_='foreignkey')
|
||||
op.drop_column('storage_template', 'ova')
|
||||
op.alter_column('storage', 'storage_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
op.drop_constraint(None, 'profile', type_='foreignkey')
|
||||
op.drop_column('profile', 'profile_type')
|
||||
op.drop_column('profile', 'ova')
|
||||
op.drop_constraint(None, 'network_template', type_='foreignkey')
|
||||
op.drop_column('network_template', 'ova')
|
||||
op.alter_column('network', 'network_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
op.drop_table('ova_volume')
|
||||
op.drop_table('ova')
|
||||
# ### end Alembic commands ###
|
@ -109,6 +109,7 @@ def install(**kwargs):
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk['mountpoint'] == "/":
|
||||
root_disk['scsi_id'] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk['mountpoint'] == '/var' or disk['mountpoint'] == '/var/log':
|
||||
@ -184,12 +185,12 @@ interface "ens2" {
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(root_disk=root_disk['disk_id'])
|
||||
""".format(root_disk=root_disk['scsi_id'])
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
15
api-daemon/pvc-api-db-upgrade
Executable file
15
api-daemon/pvc-api-db-upgrade
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Apply PVC database migrations
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
|
||||
export PVC_CONFIG_FILE="/etc/pvc/pvcapid.yaml"
|
||||
|
||||
if [[ ! -f ${PVC_CONFIG_FILE} ]]; then
|
||||
echo "Create a configuration file at ${PVC_CONFIG_FILE} before upgrading the database."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pushd /usr/share/pvc
|
||||
./pvcapid-manage.py db upgrade
|
||||
popd
|
35
api-daemon/pvcapid-manage.py
Executable file
35
api-daemon/pvcapid-manage.py
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# manage.py - PVC Database management tasks
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import os
|
||||
from flask_migrate import Migrate, MigrateCommand
|
||||
from flask_script import Manager
|
||||
|
||||
from pvcapid.flaskapi import app, db, config
|
||||
|
||||
migrate = Migrate(app, db)
|
||||
manager = Manager(app)
|
||||
|
||||
manager.add_command('db', MigrateCommand)
|
||||
|
||||
if __name__ == '__main__':
|
||||
manager.run()
|
16
api-daemon/pvcapid-worker.service
Normal file
16
api-daemon/pvcapid-worker.service
Normal file
@ -0,0 +1,16 @@
|
||||
# Parallel Virtual Cluster Provisioner API provisioner worker unit file
|
||||
|
||||
[Unit]
|
||||
Description = Parallel Virtual Cluster API provisioner worker
|
||||
After = network-online.target
|
||||
|
||||
[Service]
|
||||
Type = simple
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
Environment = PYTHONUNBUFFERED=true
|
||||
Environment = PVC_CONFIG_FILE=/etc/pvc/pvcapid.yaml
|
||||
ExecStart = /usr/bin/celery worker -A pvcapid.flaskapi.celery --concurrency 1 --loglevel INFO
|
||||
Restart = on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy = multi-user.target
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# pvcd.py - Node daemon startup stub
|
||||
# pvcapid.py - API daemon startup stub
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
@ -20,4 +20,4 @@
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import pvcd.Daemon
|
||||
import pvcapid.Daemon
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
# pvc-api client configuration file example
|
||||
# pvcapid configuration file example
|
||||
#
|
||||
# This configuration file specifies details for the PVC API client running on
|
||||
# This configuration file specifies details for the PVC API daemon running on
|
||||
# this machine. Default values are not supported; the values in this sample
|
||||
# configuration are considered defaults and can be used as-is.
|
||||
#
|
||||
# Copy this example to /etc/pvc/pvc-api.conf and edit to your needs
|
||||
# Copy this example to /etc/pvc/pvcapid.conf and edit to your needs
|
||||
|
||||
pvc:
|
||||
# debug: Enable/disable API debug mode
|
||||
@ -49,12 +49,12 @@ pvc:
|
||||
host: localhost
|
||||
# port: PostgreSQL port, invariably '5432'
|
||||
port: 5432
|
||||
# name: PostgreSQL database name, invariably 'pvcprov'
|
||||
name: pvcprov
|
||||
# user: PostgreSQL username, invariable 'pvcprov'
|
||||
user: pvcprov
|
||||
# name: PostgreSQL database name, invariably 'pvcapi'
|
||||
name: pvcapi
|
||||
# user: PostgreSQL username, invariable 'pvcapi'
|
||||
user: pvcapi
|
||||
# pass: PostgreSQL user password, randomly generated
|
||||
pass: pvcprov
|
||||
pass: pvcapi
|
||||
# queue: Celery backend queue using the PVC Zookeeper cluster
|
||||
queue:
|
||||
# host: Redis hostname, usually 'localhost'
|
||||
@ -70,7 +70,7 @@ pvc:
|
||||
storage_hosts:
|
||||
- pvchv1
|
||||
- pvchv2
|
||||
- pvchv2
|
||||
- pvchv3
|
||||
# storage_domain: The storage domain name, concatenated with the coordinators list names
|
||||
# to form monitor access strings
|
||||
storage_domain: "pvc.storage"
|
@ -8,8 +8,8 @@ After = network-online.target
|
||||
Type = simple
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
Environment = PYTHONUNBUFFERED=true
|
||||
Environment = PVC_CONFIG_FILE=/etc/pvc/pvc-api.yaml
|
||||
ExecStart = /usr/share/pvc/pvc-api.py
|
||||
Environment = PVC_CONFIG_FILE=/etc/pvc/pvcapid.yaml
|
||||
ExecStart = /usr/share/pvc/pvcapid.py
|
||||
Restart = on-failure
|
||||
|
||||
[Install]
|
35
api-daemon/pvcapid/Daemon.py
Executable file
35
api-daemon/pvcapid/Daemon.py
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Daemon.py - PVC HTTP API daemon
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import pvcapid.flaskapi as pvc_api
|
||||
|
||||
##########################################################
|
||||
# Entrypoint
|
||||
##########################################################
|
||||
|
||||
if pvc_api.config['ssl_enabled']:
|
||||
context = (pvc_api.config['ssl_cert_file'], pvc_api.config['ssl_key_file'])
|
||||
else:
|
||||
context=None
|
||||
|
||||
print('Starting PVC API daemon at {}:{} with SSL={}, Authentication={}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'], pvc_api.config['ssl_enabled'], pvc_api.config['auth_enabled']))
|
||||
pvc_api.app.run(pvc_api.config['listen_address'], pvc_api.config['listen_port'], threaded=True, ssl_context=context)
|
472
api-daemon/pvcapid/benchmark.py
Executable file
472
api-daemon/pvcapid/benchmark.py
Executable file
@ -0,0 +1,472 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# benchmark.py - PVC API Benchmark functions
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import flask
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
from pvcapid.ova import list_ova
|
||||
|
||||
def strtobool(stringv):
|
||||
if stringv is None:
|
||||
return False
|
||||
if isinstance(stringv, bool):
|
||||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
return False
|
||||
|
||||
#
|
||||
# Exceptions (used by Celery tasks)
|
||||
#
|
||||
class BenchmarkError(Exception):
|
||||
"""
|
||||
An exception that results from the Benchmark job.
|
||||
"""
|
||||
def __init__(self, message, cur_time=None, db_conn=None, db_cur=None, zk_conn=None):
|
||||
self.message = message
|
||||
if cur_time is not None:
|
||||
# Clean up our dangling result
|
||||
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
||||
args = (cur_time,)
|
||||
db_cur.execute(query, args)
|
||||
db_conn.commit()
|
||||
# Close the database connections cleanly
|
||||
close_database(db_conn, db_cur)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.message)
|
||||
|
||||
#
|
||||
# Common functions
|
||||
#
|
||||
|
||||
# Database connections
|
||||
def open_database(config):
|
||||
conn = psycopg2.connect(
|
||||
host=config['database_host'],
|
||||
port=config['database_port'],
|
||||
dbname=config['database_name'],
|
||||
user=config['database_user'],
|
||||
password=config['database_password']
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
return conn, cur
|
||||
|
||||
def close_database(conn, cur, failed=False):
|
||||
if not failed:
|
||||
conn.commit()
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
def list_benchmarks(job=None):
|
||||
if job is not None:
|
||||
query = "SELECT * FROM {} WHERE job = %s;".format('storage_benchmarks')
|
||||
args = (job, )
|
||||
else:
|
||||
query = "SELECT * FROM {} ORDER BY id DESC;".format('storage_benchmarks')
|
||||
args = ()
|
||||
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
orig_data = cur.fetchall()
|
||||
data = list()
|
||||
for benchmark in orig_data:
|
||||
benchmark_data = dict()
|
||||
benchmark_data['id'] = benchmark['id']
|
||||
benchmark_data['job'] = benchmark['job']
|
||||
benchmark_data['benchmark_result'] = benchmark['result']
|
||||
# Append the new data to our actual output structure
|
||||
data.append(benchmark_data)
|
||||
close_database(conn, cur)
|
||||
if data:
|
||||
return data, 200
|
||||
else:
|
||||
return { 'message': 'No benchmark found.' }, 404
|
||||
|
||||
def run_benchmark(self, pool):
|
||||
# Runtime imports
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
cur_time = datetime.now().isoformat(timespec='seconds')
|
||||
|
||||
print("Starting storage benchmark '{}' on pool '{}'".format(cur_time, pool))
|
||||
|
||||
# Phase 0 - connect to databases
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except:
|
||||
print('FATAL - failed to connect to Postgres')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
except:
|
||||
print('FATAL - failed to connect to Zookeeper')
|
||||
raise Exception
|
||||
|
||||
|
||||
print("Storing running status for job '{}' in database".format(cur_time))
|
||||
try:
|
||||
query = "INSERT INTO storage_benchmarks (job, result) VALUES (%s, %s);"
|
||||
args = (cur_time, "Running",)
|
||||
db_cur.execute(query, args)
|
||||
db_conn.commit()
|
||||
except Exception as e:
|
||||
raise BenchmarkError("Failed to store running status: {}".format(e), cur_time=cur_time, db_conn=db_conn, db_cur=db_cur, zk_conn=zk_conn)
|
||||
|
||||
# Phase 1 - volume preparation
|
||||
self.update_state(state='RUNNING', meta={'current': 1, 'total': 3, 'status': 'Creating benchmark volume'})
|
||||
time.sleep(1)
|
||||
|
||||
volume = 'pvcbenchmark'
|
||||
|
||||
# Create the RBD volume
|
||||
retcode, retmsg = pvc_ceph.add_volume(zk_conn, pool, volume, "8G")
|
||||
if not retcode:
|
||||
raise BenchmarkError('Failed to create volume "{}": {}'.format(volume, retmsg), cur_time=cur_time, db_conn=db_conn, db_cur=db_cur, zk_conn=zk_conn)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
# Phase 2 - benchmark run
|
||||
self.update_state(state='RUNNING', meta={'current': 2, 'total': 3, 'status': 'Running fio benchmarks on volume'})
|
||||
time.sleep(1)
|
||||
|
||||
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
||||
# 1. A sequential read test of 8GB with a 4M block size
|
||||
# 2. A sequential write test of 8GB with a 4M block size
|
||||
# 3. A random read test of 8GB with a 4M block size
|
||||
# 4. A random write test of 8GB with a 4M block size
|
||||
# 5. A random read test of 8GB with a 256k block size
|
||||
# 6. A random write test of 8GB with a 256k block size
|
||||
# 7. A random read test of 8GB with a 4k block size
|
||||
# 8. A random write test of 8GB with a 4k block size
|
||||
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
||||
# for a variety of workloads.
|
||||
test_matrix = {
|
||||
'seq_read': {
|
||||
'direction': 'read',
|
||||
'bs': '4M',
|
||||
'rw': 'read'
|
||||
},
|
||||
'seq_write': {
|
||||
'direction': 'write',
|
||||
'bs': '4M',
|
||||
'rw': 'write'
|
||||
},
|
||||
'rand_read_4M': {
|
||||
'direction': 'read',
|
||||
'bs': '4M',
|
||||
'rw': 'randread'
|
||||
},
|
||||
'rand_write_4M': {
|
||||
'direction': 'write',
|
||||
'bs': '4M',
|
||||
'rw': 'randwrite'
|
||||
},
|
||||
'rand_read_256K': {
|
||||
'direction': 'read',
|
||||
'bs': '256K',
|
||||
'rw': 'randread'
|
||||
},
|
||||
'rand_write_256K': {
|
||||
'direction': 'write',
|
||||
'bs': '256K',
|
||||
'rw': 'randwrite'
|
||||
},
|
||||
'rand_read_4K': {
|
||||
'direction': 'read',
|
||||
'bs': '4K',
|
||||
'rw': 'randread'
|
||||
},
|
||||
'rand_write_4K': {
|
||||
'direction': 'write',
|
||||
'bs': '4K',
|
||||
'rw': 'randwrite'
|
||||
}
|
||||
}
|
||||
parsed_results = dict()
|
||||
for test in test_matrix:
|
||||
print("Running test '{}'".format(test))
|
||||
fio_cmd = """
|
||||
fio \
|
||||
--output-format=terse \
|
||||
--terse-version=5 \
|
||||
--ioengine=rbd \
|
||||
--pool={pool} \
|
||||
--rbdname={volume} \
|
||||
--direct=1 \
|
||||
--randrepeat=1 \
|
||||
--iodepth=64 \
|
||||
--size=8G \
|
||||
--name={test} \
|
||||
--bs={bs} \
|
||||
--readwrite={rw}
|
||||
""".format(
|
||||
pool=pool,
|
||||
volume=volume,
|
||||
test=test,
|
||||
bs=test_matrix[test]['bs'],
|
||||
rw=test_matrix[test]['rw']
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
||||
if retcode:
|
||||
raise BenchmarkError("Failed to run fio test: {}".format(stderr), cur_time=cur_time, db_conn=db_conn, db_cur=db_cur, zk_conn=zk_conn)
|
||||
|
||||
# Parse the terse results to avoid storing tons of junk
|
||||
# Reference: https://fio.readthedocs.io/en/latest/fio_doc.html#terse-output
|
||||
# This is written out broken up because the man page didn't bother to do this, and I'm putting it here for posterity.
|
||||
# Example Read test (line breaks to match man ref):
|
||||
# I 5;fio-3.12;test;0;0; (5) [0, 1, 2, 3, 4]
|
||||
# R 8388608;2966268;724;2828; (4) [5, 6, 7, 8]
|
||||
# 0;0;0.000000;0.000000; (4) [9, 10, 11, 12]
|
||||
# 0;0;0.000000;0.000000; (4) [13, 14, 15, 16]
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20) [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,33, 34, 35, 36]
|
||||
# 0;0;0.000000;0.000000; (4) [37, 38, 39, 40]
|
||||
# 2842624;3153920;100.000000%;2967142.400000;127226.797479;5; (6) [41, 42, 43, 44, 45, 46]
|
||||
# 694;770;724.400000;31.061230;5; (5) [47, 48, 49, 50, 51]
|
||||
# W 0;0;0;0; (4) [52, 53, 54, 55]
|
||||
# 0;0;0.000000;0.000000; (4) [56, 57, 58, 59]
|
||||
# 0;0;0.000000;0.000000; (4) [60, 61, 62, 63]
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20) [64, 65, 66, 67, 68. 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83]
|
||||
# 0;0;0.000000;0.000000; (4) [84, 85, 86, 87]
|
||||
# 0;0;0.000000%;0.000000;0.000000;0; (6) [88, 89, 90, 91, 92, 93]
|
||||
# 0;0;0.000000;0.000000;0; (5) [94, 95, 96, 97, 98]
|
||||
# T 0;0;0;0; (4) [99, 100, 101, 102]
|
||||
# 0;0;0.000000;0.000000; (4) [103, 104, 105, 106]
|
||||
# 0;0;0.000000;0.000000; (4) [107, 108, 109, 110]
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20) [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130]
|
||||
# 0;0;0.000000;0.000000; (4) [131, 132, 133, 134]
|
||||
# 0;0;0.000000%;0.000000;0.000000;0; (6) [135, 136, 137, 138, 139, 140]
|
||||
# 0;0;0.000000;0.000000;0; (5) [141, 142, 143, 144, 145]
|
||||
# C 0.495225%;0.000000%;2083;0;13; (5) [146, 147, 148, 149, 150]
|
||||
# D 0.1%;0.1%;0.2%;0.4%;0.8%;1.6%;96.9%; (7) [151, 152, 153, 154, 155, 156, 157]
|
||||
# U 0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%; (10) [158, 159, 160, 161, 162, 163, 164, 165, 166, 167]
|
||||
# M 0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%; (12) [168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178. 179]
|
||||
# B dm-0;0;110;0;0;0;4;4;0.15%; (9) [180, 181, 182, 183, 184, 185, 186, 187, 188]
|
||||
# slaves;0;118;0;28;0;23;0;0.00%; (9) [189, 190, 191, 192, 193, 194, 195, 196, 197]
|
||||
# sde;0;118;0;28;0;23;0;0.00% (9) [198, 199, 200, 201, 202, 203, 204, 205, 206]
|
||||
# Example Write test:
|
||||
# I 5;fio-3.12;test;0;0; (5)
|
||||
# R 0;0;0;0; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0;0;0.000000%;0.000000;0.000000;0; (6)
|
||||
# 0;0;0.000000;0.000000;0; (5)
|
||||
# W 8388608;1137438;277;7375; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 704512;1400832;99.029573%;1126400.000000;175720.860374;14; (6)
|
||||
# 172;342;275.000000;42.900601;14; (5)
|
||||
# T 0;0;0;0; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0;0%=0; (20)
|
||||
# 0;0;0.000000;0.000000; (4)
|
||||
# 0;0;0.000000%;0.000000;0.000000;0; (6)
|
||||
# 0;0;0.000000;0.000000;0; (5)
|
||||
# C 12.950909%;1.912124%;746;0;95883; (5)
|
||||
# D 0.1%;0.1%;0.2%;0.4%;0.8%;1.6%;96.9%; (7)
|
||||
# U 0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%; (10)
|
||||
# M 0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%;0.00%; (12)
|
||||
# B dm-0;0;196;0;0;0;12;12;0.16%; (9)
|
||||
# slaves;0;207;0;95;0;39;16;0.21%; (9)
|
||||
# sde;0;207;0;95;0;39;16;0.21% (9)
|
||||
results = stdout.split(';')
|
||||
if test_matrix[test]['direction'] == 'read':
|
||||
# Stats
|
||||
# 5: Total IO (KiB)
|
||||
# 6: bandwidth (KiB/sec)
|
||||
# 7: IOPS
|
||||
# 8: runtime (msec)
|
||||
# Total latency
|
||||
# 37: min
|
||||
# 38: max
|
||||
# 39: mean
|
||||
# 40: stdev
|
||||
# Bandwidth
|
||||
# 41: min
|
||||
# 42: max
|
||||
# 44: mean
|
||||
# 45: stdev
|
||||
# 46: # samples
|
||||
# IOPS
|
||||
# 47: min
|
||||
# 48: max
|
||||
# 49: mean
|
||||
# 50: stdev
|
||||
# 51: # samples
|
||||
# CPU
|
||||
# 146: user
|
||||
# 147: system
|
||||
# 148: ctx switches
|
||||
# 149: maj faults
|
||||
# 150: min faults
|
||||
parsed_results[test] = {
|
||||
"overall": {
|
||||
"iosize": results[5],
|
||||
"bandwidth": results[6],
|
||||
"iops": results[7],
|
||||
"runtime": results[8]
|
||||
},
|
||||
"latency": {
|
||||
"min": results[37],
|
||||
"max": results[38],
|
||||
"mean": results[39],
|
||||
"stdev": results[40]
|
||||
},
|
||||
"bandwidth": {
|
||||
"min": results[41],
|
||||
"max": results[42],
|
||||
"mean": results[44],
|
||||
"stdev": results[45],
|
||||
"numsamples": results[46],
|
||||
},
|
||||
"iops": {
|
||||
"min": results[47],
|
||||
"max": results[48],
|
||||
"mean": results[49],
|
||||
"stdev": results[50],
|
||||
"numsamples": results[51]
|
||||
},
|
||||
"cpu": {
|
||||
"user": results[146],
|
||||
"system": results[147],
|
||||
"ctxsw": results[148],
|
||||
"majfault": results[149],
|
||||
"minfault": results[150]
|
||||
}
|
||||
}
|
||||
|
||||
if test_matrix[test]['direction'] == 'write':
|
||||
# Stats
|
||||
# 52: Total IO (KiB)
|
||||
# 53: bandwidth (KiB/sec)
|
||||
# 54: IOPS
|
||||
# 55: runtime (msec)
|
||||
# Total latency
|
||||
# 84: min
|
||||
# 85: max
|
||||
# 86: mean
|
||||
# 87: stdev
|
||||
# Bandwidth
|
||||
# 88: min
|
||||
# 89: max
|
||||
# 91: mean
|
||||
# 92: stdev
|
||||
# 93: # samples
|
||||
# IOPS
|
||||
# 94: min
|
||||
# 95: max
|
||||
# 96: mean
|
||||
# 97: stdev
|
||||
# 98: # samples
|
||||
# CPU
|
||||
# 146: user
|
||||
# 147: system
|
||||
# 148: ctx switches
|
||||
# 149: maj faults
|
||||
# 150: min faults
|
||||
parsed_results[test] = {
|
||||
"overall": {
|
||||
"iosize": results[52],
|
||||
"bandwidth": results[53],
|
||||
"iops": results[54],
|
||||
"runtime": results[55]
|
||||
},
|
||||
"latency": {
|
||||
"min": results[84],
|
||||
"max": results[85],
|
||||
"mean": results[86],
|
||||
"stdev": results[87]
|
||||
},
|
||||
"bandwidth": {
|
||||
"min": results[88],
|
||||
"max": results[89],
|
||||
"mean": results[91],
|
||||
"stdev": results[92],
|
||||
"numsamples": results[93],
|
||||
},
|
||||
"iops": {
|
||||
"min": results[94],
|
||||
"max": results[95],
|
||||
"mean": results[96],
|
||||
"stdev": results[97],
|
||||
"numsamples": results[98]
|
||||
},
|
||||
"cpu": {
|
||||
"user": results[146],
|
||||
"system": results[147],
|
||||
"ctxsw": results[148],
|
||||
"majfault": results[149],
|
||||
"minfault": results[150]
|
||||
}
|
||||
}
|
||||
|
||||
# Phase 3 - cleanup
|
||||
self.update_state(state='RUNNING', meta={'current': 3, 'total': 3, 'status': 'Cleaning up and storing results'})
|
||||
time.sleep(1)
|
||||
|
||||
# Remove the RBD volume
|
||||
retcode, retmsg = pvc_ceph.remove_volume(zk_conn, pool, volume)
|
||||
if not retcode:
|
||||
raise BenchmarkError('Failed to remove volume "{}": {}'.format(volume, retmsg), cur_time=cur_time, db_conn=db_conn, db_cur=db_cur, zk_conn=zk_conn)
|
||||
else:
|
||||
print(retmsg)
|
||||
|
||||
print("Storing result of tests for job '{}' in database".format(cur_time))
|
||||
try:
|
||||
query = "UPDATE storage_benchmarks SET result = %s WHERE job = %s;"
|
||||
args = (json.dumps(parsed_results), cur_time)
|
||||
db_cur.execute(query, args)
|
||||
db_conn.commit()
|
||||
except Exception as e:
|
||||
raise BenchmarkError("Failed to store test results: {}".format(e), cur_time=cur_time, db_conn=db_conn, db_cur=db_cur, zk_conn=zk_conn)
|
||||
|
||||
close_database(db_conn, db_cur)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
return { 'status': "Storage benchmark '{}' completed successfully.", 'current': 3, 'total': 3 }
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# pvcapi_helper.py - PVC HTTP API functions
|
||||
# helper.py - PVC HTTP API helper functions
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
@ -24,14 +24,26 @@ import flask
|
||||
import json
|
||||
import lxml.etree as etree
|
||||
|
||||
from distutils.util import strtobool
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
import client_lib.common as pvc_common
|
||||
import client_lib.cluster as pvc_cluster
|
||||
import client_lib.node as pvc_node
|
||||
import client_lib.vm as pvc_vm
|
||||
import client_lib.network as pvc_network
|
||||
import client_lib.ceph as pvc_ceph
|
||||
from werkzeug.formparser import parse_form_data
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.cluster as pvc_cluster
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
def strtobool(stringv):
|
||||
if stringv is None:
|
||||
return False
|
||||
if isinstance(stringv, bool):
|
||||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
return False
|
||||
|
||||
#
|
||||
# Initialization function
|
||||
@ -104,12 +116,12 @@ def cluster_maintenance(maint_state='false'):
|
||||
#
|
||||
# Node functions
|
||||
#
|
||||
def node_list(limit=None, is_fuzzy=True):
|
||||
def node_list(limit=None, daemon_state=None, coordinator_state=None, domain_state=None, is_fuzzy=True):
|
||||
"""
|
||||
Return a list of nodes with limit LIMIT.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_node.get_list(zk_conn, limit, is_fuzzy=is_fuzzy)
|
||||
retflag, retdata = pvc_node.get_list(zk_conn, limit, daemon_state=daemon_state, coordinator_state=coordinator_state, domain_state=domain_state, is_fuzzy=is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -428,7 +440,7 @@ def vm_define(xml, node, limit, selector, autostart):
|
||||
xml_data = etree.fromstring(xml)
|
||||
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
|
||||
except Exception as e:
|
||||
return {'message': 'Error: XML is malformed or incorrect: {}'.format(e)}, 400
|
||||
return { 'message': 'XML is malformed or incorrect: {}'.format(e) }, 400
|
||||
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.define_vm(zk_conn, new_cfg, node, limit, selector, autostart, profile=None)
|
||||
@ -510,7 +522,7 @@ def vm_modify(name, restart, xml):
|
||||
xml_data = etree.fromstring(xml)
|
||||
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
|
||||
except Exception as e:
|
||||
return {'message': 'Error: XML is malformed or incorrect: {}'.format(e)}, 400
|
||||
return { 'message': 'XML is malformed or incorrect: {}'.format(e) }, 400
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.modify_vm(zk_conn, name, restart, new_cfg)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
@ -579,12 +591,12 @@ def vm_start(name):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def vm_restart(name):
|
||||
def vm_restart(name, wait):
|
||||
"""
|
||||
Restart a VM in the PVC cluster.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.restart_vm(zk_conn, name)
|
||||
retflag, retdata = pvc_vm.restart_vm(zk_conn, name, wait)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -597,12 +609,12 @@ def vm_restart(name):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def vm_shutdown(name):
|
||||
def vm_shutdown(name, wait):
|
||||
"""
|
||||
Shutdown a VM in the PVC cluster.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.shutdown_vm(zk_conn, name)
|
||||
retflag, retdata = pvc_vm.shutdown_vm(zk_conn, name, wait)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -651,12 +663,12 @@ def vm_disable(name):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def vm_move(name, node):
|
||||
def vm_move(name, node, wait, force_live):
|
||||
"""
|
||||
Move a VM to another node.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.move_vm(zk_conn, name, node)
|
||||
retflag, retdata = pvc_vm.move_vm(zk_conn, name, node, wait, force_live)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -669,12 +681,12 @@ def vm_move(name, node):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def vm_migrate(name, node, flag_force):
|
||||
def vm_migrate(name, node, flag_force, wait, force_live):
|
||||
"""
|
||||
Temporarily migrate a VM to another node.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.migrate_vm(zk_conn, name, node, flag_force)
|
||||
retflag, retdata = pvc_vm.migrate_vm(zk_conn, name, node, flag_force, wait, force_live)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -687,12 +699,12 @@ def vm_migrate(name, node, flag_force):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def vm_unmigrate(name):
|
||||
def vm_unmigrate(name, wait, force_live):
|
||||
"""
|
||||
Unmigrate a migrated VM.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_vm.unmigrate_vm(zk_conn, name)
|
||||
retflag, retdata = pvc_vm.unmigrate_vm(zk_conn, name, wait, force_live)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -974,12 +986,12 @@ def ceph_status():
|
||||
|
||||
return retdata, retcode
|
||||
|
||||
def ceph_radosdf():
|
||||
def ceph_util():
|
||||
"""
|
||||
Get the current Ceph cluster utilization.
|
||||
"""
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.get_radosdf(zk_conn)
|
||||
retflag, retdata = pvc_ceph.get_util(zk_conn)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
if retflag:
|
||||
@ -1327,6 +1339,150 @@ def ceph_volume_remove(pool, name):
|
||||
}
|
||||
return output, retcode
|
||||
|
||||
def ceph_volume_upload(pool, volume, img_type):
|
||||
"""
|
||||
Upload a raw file via HTTP post to a PVC Ceph volume
|
||||
"""
|
||||
# Determine the image conversion options
|
||||
if img_type not in ['raw', 'vmdk', 'qcow2', 'qed', 'vdi', 'vpc']:
|
||||
output = {
|
||||
"message": "Image type '{}' is not valid.".format(img_type)
|
||||
}
|
||||
retcode = 400
|
||||
return output, retcode
|
||||
|
||||
# Get the size of the target block device
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retcode, retdata = pvc_ceph.get_list_volume(zk_conn, pool, volume, is_fuzzy=False)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
# If there's no target, return failure
|
||||
if not retcode or len(retdata) < 1:
|
||||
output = {
|
||||
"message": "Target volume '{}' does not exist in pool '{}'.".format(volume, pool)
|
||||
}
|
||||
retcode = 400
|
||||
return output, retcode
|
||||
dev_size = retdata[0]['stats']['size']
|
||||
|
||||
def cleanup_maps_and_volumes():
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
# Unmap the target blockdev
|
||||
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, volume)
|
||||
# Unmap the temporary blockdev
|
||||
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "{}_tmp".format(volume))
|
||||
# Remove the temporary blockdev
|
||||
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "{}_tmp".format(volume))
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# Create a temporary block device to store non-raw images
|
||||
if img_type == 'raw':
|
||||
# Map the target blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
dest_blockdev = retdata
|
||||
|
||||
# Save the data to the blockdev directly
|
||||
try:
|
||||
data.save(dest_blockdev)
|
||||
except:
|
||||
output = {
|
||||
'message': "Failed to write image file to volume."
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
output = {
|
||||
'message': "Wrote uploaded file to volume '{}' in pool '{}'.".format(volume, pool)
|
||||
}
|
||||
retcode = 200
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Write the image directly to the blockdev
|
||||
else:
|
||||
# Create a temporary blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "{}_tmp".format(volume), dev_size)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Map the temporary target blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "{}_tmp".format(volume))
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
temp_blockdev = retdata
|
||||
|
||||
# Map the target blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
dest_blockdev = retdata
|
||||
|
||||
# Save the data to the temporary blockdev directly
|
||||
try:
|
||||
# This sets up a custom stream_factory that writes directly into the ova_blockdev,
|
||||
# rather than the standard stream_factory which writes to a temporary file waiting
|
||||
# on a save() call. This will break if the API ever uploaded multiple files, but
|
||||
# this is an acceptable workaround.
|
||||
def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
|
||||
return open(temp_blockdev, 'wb')
|
||||
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
||||
except:
|
||||
output = {
|
||||
'message': "Failed to upload or write image file to temporary volume."
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Convert from the temporary to destination format on the blockdevs
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
'qemu-img convert -C -f {} -O raw {} {}'.format(img_type, temp_blockdev, dest_blockdev)
|
||||
)
|
||||
if retcode:
|
||||
output = {
|
||||
'message': "Failed to convert image format from '{}' to 'raw': {}".format(img_type, stderr)
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
output = {
|
||||
'message': "Converted and wrote uploaded file to volume '{}' in pool '{}'.".format(volume, pool)
|
||||
}
|
||||
retcode = 200
|
||||
cleanup_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True):
|
||||
"""
|
||||
Get the list of RBD volume snapshots in the Ceph storage cluster.
|
@ -53,6 +53,7 @@ libvirt_header = """<domain type='kvm'>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>restart</on_crash>
|
||||
<devices>
|
||||
<console type='pty'/>
|
||||
"""
|
||||
|
||||
# File footer, closing devices and domain elements
|
||||
@ -75,7 +76,6 @@ devices_default = """ <emulator>/usr/bin/kvm</emulator>
|
||||
devices_serial = """ <serial type='pty'>
|
||||
<log file='/var/log/libvirt/{vm_name}.log' append='on'/>
|
||||
</serial>
|
||||
<console type='pty'/>
|
||||
"""
|
||||
|
||||
# VNC device
|
229
api-daemon/pvcapid/models.py
Executable file
229
api-daemon/pvcapid/models.py
Executable file
@ -0,0 +1,229 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# models.py - PVC Database models
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
from pvcapid.flaskapi import app, db
|
||||
|
||||
class DBSystemTemplate(db.Model):
|
||||
__tablename__ = 'system_template'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
vcpu_count = db.Column(db.Integer, nullable=False)
|
||||
vram_mb = db.Column(db.Integer, nullable=False)
|
||||
serial = db.Column(db.Boolean, nullable=False)
|
||||
vnc = db.Column(db.Boolean, nullable=False)
|
||||
vnc_bind = db.Column(db.Text)
|
||||
node_limit = db.Column(db.Text)
|
||||
node_selector = db.Column(db.Text)
|
||||
node_autostart = db.Column(db.Boolean, nullable=False)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=True)
|
||||
|
||||
def __init__(self, name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart, ova=None):
|
||||
self.name = name
|
||||
self.vcpu_count = vcpu_count
|
||||
self.vram_mb = vram_mb
|
||||
self.serial = serial
|
||||
self.vnc = vnc
|
||||
self.vnc_bind = vnc_bind
|
||||
self.node_limit = node_limit
|
||||
self.node_selector = node_selector
|
||||
self.node_autostart = node_autostart
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBNetworkTemplate(db.Model):
|
||||
__tablename__ = 'network_template'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
mac_template = db.Column(db.Text)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=True)
|
||||
|
||||
def __init__(self, name, mac_template, ova=None):
|
||||
self.name = name
|
||||
self.mac_template = mac_template
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBNetworkElement(db.Model):
|
||||
__tablename__ = 'network'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
network_template = db.Column(db.Integer, db.ForeignKey("network_template.id"), nullable=False)
|
||||
vni = db.Column(db.Integer, nullable=False)
|
||||
|
||||
def __init__(self, network_template, vni):
|
||||
self.network_template = network_template
|
||||
self.vni = vni
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBStorageTemplate(db.Model):
|
||||
__tablename__ = 'storage_template'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=True)
|
||||
|
||||
def __init__(self, name, ova=None):
|
||||
self.name = name
|
||||
self.ova = ova
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBStorageElement(db.Model):
|
||||
__tablename__ = 'storage'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
storage_template = db.Column(db.Integer, db.ForeignKey("storage_template.id"), nullable=False)
|
||||
pool = db.Column(db.Text, nullable=False)
|
||||
disk_id = db.Column(db.Text, nullable=False)
|
||||
source_volume = db.Column(db.Text)
|
||||
disk_size_gb = db.Column(db.Integer)
|
||||
mountpoint = db.Column(db.Text)
|
||||
filesystem = db.Column(db.Text)
|
||||
filesystem_args = db.Column(db.Text)
|
||||
|
||||
def __init__(self, storage_template, pool, disk_id, source_volume, disk_size_gb, mountpoint, filesystem, filesystem_args):
|
||||
self.storage_template = storage_template
|
||||
self.pool = pool
|
||||
self.disk_id = disk_id
|
||||
self.source_volume = source_volume
|
||||
self.disk_size_gb = disk_size_gb
|
||||
self.mountpoint = mountpoint
|
||||
self.filesystem = filesystem
|
||||
self.filesystem_args = filesystem_args
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBUserdata(db.Model):
|
||||
__tablename__ = 'userdata'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
userdata = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, name, userdata):
|
||||
self.name = name
|
||||
self.userdata = userdata
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBScript(db.Model):
|
||||
__tablename__ = 'script'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
script = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, name, script):
|
||||
self.name = name
|
||||
self.script = script
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBOva(db.Model):
|
||||
__tablename__ = 'ova'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
ovf = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, name, ovf):
|
||||
self.name = name
|
||||
self.ovf = ovf
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBOvaVolume(db.Model):
|
||||
__tablename__ = 'ova_volume'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"), nullable=False)
|
||||
pool = db.Column(db.Text, nullable=False)
|
||||
volume_name = db.Column(db.Text, nullable=False)
|
||||
volume_format = db.Column(db.Text, nullable=False)
|
||||
disk_id = db.Column(db.Text, nullable=False)
|
||||
disk_size_gb = db.Column(db.Integer, nullable=False)
|
||||
|
||||
def __init__(self, ova, pool, volume_name, volume_format, disk_id, disk_size_gb):
|
||||
self.ova = ova
|
||||
self.pool = pool
|
||||
self.volume_name = volume_name
|
||||
self.volume_format = volume_format
|
||||
self.disk_id = disk_id
|
||||
self.disk_size_gb = disk_size_gb
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBProfile(db.Model):
|
||||
__tablename__ = 'profile'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
profile_type = db.Column(db.Text, nullable=False)
|
||||
system_template = db.Column(db.Integer, db.ForeignKey("system_template.id"))
|
||||
network_template = db.Column(db.Integer, db.ForeignKey("network_template.id"))
|
||||
storage_template = db.Column(db.Integer, db.ForeignKey("storage_template.id"))
|
||||
userdata = db.Column(db.Integer, db.ForeignKey("userdata.id"))
|
||||
script = db.Column(db.Integer, db.ForeignKey("script.id"))
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"))
|
||||
arguments = db.Column(db.Text)
|
||||
|
||||
def __init__(self, name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments):
|
||||
self.name = name
|
||||
self.profile_type = profile_type
|
||||
self.system_template = system_template
|
||||
self.network_template = network_template
|
||||
self.storage_template = storage_template
|
||||
self.userdata = userdata
|
||||
self.script = script
|
||||
self.ova = ova
|
||||
self.arguments = arguments
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
||||
|
||||
class DBStorageBenchmarks(db.Model):
|
||||
__tablename__ = 'storage_benchmarks'
|
||||
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
job = db.Column(db.Text, nullable=False)
|
||||
result = db.Column(db.Text, nullable=False)
|
||||
|
||||
def __init__(self, job, result):
|
||||
self.job = job
|
||||
self.result = result
|
||||
|
||||
def __repr__(self):
|
||||
return '<id {}>'.format(self.id)
|
556
api-daemon/pvcapid/ova.py
Executable file
556
api-daemon/pvcapid/ova.py
Executable file
@ -0,0 +1,556 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# ova.py - PVC OVA parser library
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import flask
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import math
|
||||
import tarfile
|
||||
import shutil
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
import lxml.etree
|
||||
|
||||
from werkzeug.formparser import parse_form_data
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
import pvcapid.provisioner as provisioner
|
||||
|
||||
#
|
||||
# Common functions
|
||||
#
|
||||
|
||||
# Database connections
|
||||
def open_database(config):
|
||||
conn = psycopg2.connect(
|
||||
host=config['database_host'],
|
||||
port=config['database_port'],
|
||||
dbname=config['database_name'],
|
||||
user=config['database_user'],
|
||||
password=config['database_password']
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
return conn, cur
|
||||
|
||||
def close_database(conn, cur, failed=False):
|
||||
if not failed:
|
||||
conn.commit()
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
#
|
||||
# OVA functions
|
||||
#
|
||||
def list_ova(limit, is_fuzzy=True):
|
||||
if limit:
|
||||
if is_fuzzy:
|
||||
# Handle fuzzy vs. non-fuzzy limits
|
||||
if not re.match('\^.*', limit):
|
||||
limit = '%' + limit
|
||||
else:
|
||||
limit = limit[1:]
|
||||
if not re.match('.*\$', limit):
|
||||
limit = limit + '%'
|
||||
else:
|
||||
limit = limit[:-1]
|
||||
|
||||
query = "SELECT id, name FROM {} WHERE name LIKE %s;".format('ova')
|
||||
args = (limit, )
|
||||
else:
|
||||
query = "SELECT id, name FROM {};".format('ova')
|
||||
args = ()
|
||||
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
data = cur.fetchall()
|
||||
close_database(conn, cur)
|
||||
|
||||
ova_data = list()
|
||||
|
||||
for ova in data:
|
||||
ova_id = ova.get('id')
|
||||
ova_name = ova.get('name')
|
||||
|
||||
query = "SELECT pool, volume_name, volume_format, disk_id, disk_size_gb FROM {} WHERE ova = %s;".format('ova_volume')
|
||||
args = (ova_id,)
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
volumes = cur.fetchall()
|
||||
close_database(conn, cur)
|
||||
|
||||
ova_data.append({'id': ova_id, 'name': ova_name, 'volumes': volumes})
|
||||
|
||||
if ova_data:
|
||||
return ova_data, 200
|
||||
else:
|
||||
return { 'message': 'No OVAs found.' }, 404
|
||||
|
||||
def delete_ova(name):
|
||||
ova_data, retcode = list_ova(name, is_fuzzy=False)
|
||||
if retcode != 200:
|
||||
retmsg = { 'message': 'The OVA "{}" does not exist.'.format(name) }
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
||||
conn, cur = open_database(config)
|
||||
ova_id = ova_data[0].get('id')
|
||||
try:
|
||||
# Get the list of volumes for this OVA
|
||||
query = "SELECT pool, volume_name FROM ova_volume WHERE ova = %s;"
|
||||
args = (ova_id,)
|
||||
cur.execute(query, args)
|
||||
volumes = cur.fetchall()
|
||||
|
||||
# Remove each volume for this OVA
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
for volume in volumes:
|
||||
pvc_ceph.remove_volume(zk_conn, volume.get('pool'), volume.get('volume_name'))
|
||||
|
||||
# Delete the volume entries from the database
|
||||
query = "DELETE FROM ova_volume WHERE ova = %s;"
|
||||
args = (ova_id,)
|
||||
cur.execute(query, args)
|
||||
|
||||
# Delete the profile entries from the database
|
||||
query = "DELETE FROM profile WHERE ova = %s;"
|
||||
args = (ova_id,)
|
||||
cur.execute(query, args)
|
||||
|
||||
# Delete the system_template entries from the database
|
||||
query = "DELETE FROM system_template WHERE ova = %s;"
|
||||
args = (ova_id,)
|
||||
cur.execute(query, args)
|
||||
|
||||
# Delete the OVA entry from the database
|
||||
query = "DELETE FROM ova WHERE id = %s;"
|
||||
args = (ova_id,)
|
||||
cur.execute(query, args)
|
||||
|
||||
retmsg = { "message": 'Removed OVA image "{}".'.format(name) }
|
||||
retcode = 200
|
||||
except Exception as e:
|
||||
retmsg = { 'message': 'Failed to remove OVA "{}": {}'.format(name, e) }
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return retmsg, retcode
|
||||
|
||||
def upload_ova(pool, name, ova_size):
|
||||
ova_archive = None
|
||||
|
||||
# Cleanup function
|
||||
def cleanup_ova_maps_and_volumes():
|
||||
# Close the OVA archive
|
||||
if ova_archive:
|
||||
ova_archive.close()
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
# Unmap the OVA temporary blockdev
|
||||
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "ova_{}".format(name))
|
||||
# Remove the OVA temporary blockdev
|
||||
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "ova_{}".format(name))
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# Normalize the OVA size to bytes
|
||||
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
|
||||
ova_size = pvc_ceph.format_bytes_fromhuman(ova_size)
|
||||
|
||||
# Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
pool_free_space_bytes = int(pool_information['stats']['free_bytes'])
|
||||
if ova_size_bytes * 2 >= pool_free_space_bytes:
|
||||
output = {
|
||||
'message': "The cluster does not have enough free space ({}) to store the OVA volume ({}).".format(
|
||||
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
|
||||
pvc_ceph.format_bytes_tohuman(ova_size_bytes)
|
||||
)
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Create a temporary OVA blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "ova_{}".format(name), ova_size)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Map the temporary OVA blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "ova_{}".format(name))
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
ova_blockdev = retdata
|
||||
|
||||
# Save the OVA data to the temporary blockdev directly
|
||||
try:
|
||||
# This sets up a custom stream_factory that writes directly into the ova_blockdev,
|
||||
# rather than the standard stream_factory which writes to a temporary file waiting
|
||||
# on a save() call. This will break if the API ever uploaded multiple files, but
|
||||
# this is an acceptable workaround.
|
||||
def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
|
||||
return open(ova_blockdev, 'wb')
|
||||
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
||||
except:
|
||||
output = {
|
||||
'message': "Failed to upload or write OVA file to temporary volume."
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
try:
|
||||
# Set up the TAR reader for the OVA temporary blockdev
|
||||
ova_archive = tarfile.open(name=ova_blockdev)
|
||||
# Determine the files in the OVA
|
||||
members = ova_archive.getmembers()
|
||||
except tarfile.TarError:
|
||||
output = {
|
||||
'message': "The uploaded OVA file is not readable."
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Parse through the members list and extract the OVF file
|
||||
for element in set(x for x in members if re.match('.*\.ovf$', x.name)):
|
||||
ovf_file = ova_archive.extractfile(element)
|
||||
|
||||
# Parse the OVF file to get our VM details
|
||||
ovf_parser = OVFParser(ovf_file)
|
||||
ovf_xml_raw = ovf_parser.getXML()
|
||||
virtual_system = ovf_parser.getVirtualSystems()[0]
|
||||
virtual_hardware = ovf_parser.getVirtualHardware(virtual_system)
|
||||
disk_map = ovf_parser.getDiskMap(virtual_system)
|
||||
|
||||
# Close the OVF file
|
||||
ovf_file.close()
|
||||
|
||||
# Create and upload each disk volume
|
||||
for idx, disk in enumerate(disk_map):
|
||||
disk_identifier = "sd{}".format(chr(ord('a') + idx))
|
||||
volume = "ova_{}_{}".format(name, disk_identifier)
|
||||
dev_src = disk.get('src')
|
||||
dev_type = dev_src.split('.')[-1]
|
||||
dev_size_raw = ova_archive.getmember(dev_src).size
|
||||
vm_volume_size = disk.get('capacity')
|
||||
|
||||
# Normalize the dev size to bytes
|
||||
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size_raw)[:-1])
|
||||
dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw)
|
||||
|
||||
def cleanup_img_maps():
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
# Unmap the temporary blockdev
|
||||
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, volume)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# Create the blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, volume, dev_size)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
# Map the blockdev
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
if not retflag:
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
temp_blockdev = retdata
|
||||
|
||||
try:
|
||||
# Open (extract) the TAR archive file and seek to byte 0
|
||||
vmdk_file = ova_archive.extractfile(disk.get('src'))
|
||||
vmdk_file.seek(0)
|
||||
# Open the temporary blockdev and seek to byte 0
|
||||
blk_file = open(temp_blockdev, 'wb')
|
||||
blk_file.seek(0)
|
||||
# Write the contents of vmdk_file into blk_file
|
||||
bytes_written = blk_file.write(vmdk_file.read())
|
||||
# Close blk_file (and flush the buffers)
|
||||
blk_file.close()
|
||||
# Close vmdk_file
|
||||
vmdk_file.close()
|
||||
# Perform an OS-level sync
|
||||
pvc_common.run_os_command('sync')
|
||||
except:
|
||||
output = {
|
||||
'message': "Failed to write image file '{}' to temporary volume.".format(disk.get('src'))
|
||||
}
|
||||
retcode = 400
|
||||
cleanup_img_maps()
|
||||
cleanup_ova_maps_and_volumes()
|
||||
return output, retcode
|
||||
|
||||
cleanup_img_maps()
|
||||
|
||||
cleanup_ova_maps_and_volumes()
|
||||
|
||||
# Prepare the database entries
|
||||
query = "INSERT INTO ova (name, ovf) VALUES (%s, %s);"
|
||||
args = (name, ovf_xml_raw)
|
||||
conn, cur = open_database(config)
|
||||
try:
|
||||
cur.execute(query, args)
|
||||
close_database(conn, cur)
|
||||
except Exception as e:
|
||||
output = {
|
||||
'message': 'Failed to create OVA entry "{}": {}'.format(name, e)
|
||||
}
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return output, retcode
|
||||
|
||||
# Get the OVA database id
|
||||
query = "SELECT id FROM ova WHERE name = %s;"
|
||||
args = (name, )
|
||||
conn, cur = open_database(config)
|
||||
cur.execute(query, args)
|
||||
ova_id = cur.fetchone()['id']
|
||||
close_database(conn, cur)
|
||||
|
||||
# Prepare disk entries in ova_volume
|
||||
for idx, disk in enumerate(disk_map):
|
||||
disk_identifier = "sd{}".format(chr(ord('a') + idx))
|
||||
volume_type = disk.get('src').split('.')[-1]
|
||||
volume = "ova_{}_{}".format(name, disk_identifier)
|
||||
vm_volume_size = disk.get('capacity')
|
||||
|
||||
# The function always return XXXXB, so strip off the B and convert to an integer
|
||||
vm_volume_size_bytes = int(pvc_ceph.format_bytes_fromhuman(vm_volume_size)[:-1])
|
||||
vm_volume_size_gb = math.ceil(vm_volume_size_bytes / 1024 / 1024 / 1024)
|
||||
|
||||
query = "INSERT INTO ova_volume (ova, pool, volume_name, volume_format, disk_id, disk_size_gb) VALUES (%s, %s, %s, %s, %s, %s);"
|
||||
args = (ova_id, pool, volume, volume_type, disk_identifier, vm_volume_size_gb)
|
||||
|
||||
conn, cur = open_database(config)
|
||||
try:
|
||||
cur.execute(query, args)
|
||||
close_database(conn, cur)
|
||||
except Exception as e:
|
||||
output = {
|
||||
'message': 'Failed to create OVA volume entry "{}": {}'.format(volume, e)
|
||||
}
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return output, retcode
|
||||
|
||||
# Prepare a system_template for the OVA
|
||||
vcpu_count = virtual_hardware.get('vcpus')
|
||||
vram_mb = virtual_hardware.get('vram')
|
||||
if virtual_hardware.get('graphics-controller') == 1:
|
||||
vnc = True
|
||||
serial = False
|
||||
else:
|
||||
vnc = False
|
||||
serial = True
|
||||
retdata, retcode = provisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
system_template, retcode = provisioner.list_template_system(name, is_fuzzy=False)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
system_template_name = system_template[0].get('name')
|
||||
|
||||
# Prepare a barebones profile for the OVA
|
||||
retdata, retcode = provisioner.create_profile(name, 'ova', system_template_name, None, None, userdata=None, script=None, ova=name, arguments=None)
|
||||
if retcode != 200:
|
||||
return retdata, retcode
|
||||
|
||||
output = {
|
||||
'message': "Imported OVA image '{}'.".format(name)
|
||||
}
|
||||
retcode = 200
|
||||
return output, retcode
|
||||
|
||||
#
|
||||
# OVF parser
|
||||
#
|
||||
class OVFParser(object):
|
||||
RASD_TYPE = {
|
||||
"1": "vmci",
|
||||
"3": "vcpus",
|
||||
"4": "vram",
|
||||
"5": "ide-controller",
|
||||
"6": "scsi-controller",
|
||||
"10": "ethernet-adapter",
|
||||
"15": "cdrom",
|
||||
"17": "disk",
|
||||
"20": "other-storage-device",
|
||||
"23": "usb-controller",
|
||||
"24": "graphics-controller",
|
||||
"35": "sound-controller"
|
||||
}
|
||||
|
||||
def _getFilelist(self):
|
||||
path = "{{{schema}}}References/{{{schema}}}File".format(schema=self.OVF_SCHEMA)
|
||||
id_attr = "{{{schema}}}id".format(schema=self.OVF_SCHEMA)
|
||||
href_attr = "{{{schema}}}href".format(schema=self.OVF_SCHEMA)
|
||||
current_list = self.xml.findall(path)
|
||||
results = [(x.get(id_attr), x.get(href_attr)) for x in current_list]
|
||||
return results
|
||||
|
||||
def _getDisklist(self):
|
||||
path = "{{{schema}}}DiskSection/{{{schema}}}Disk".format(schema=self.OVF_SCHEMA)
|
||||
id_attr = "{{{schema}}}diskId".format(schema=self.OVF_SCHEMA)
|
||||
ref_attr = "{{{schema}}}fileRef".format(schema=self.OVF_SCHEMA)
|
||||
cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA)
|
||||
cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA)
|
||||
current_list = self.xml.findall(path)
|
||||
results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units)) for x in current_list]
|
||||
return results
|
||||
|
||||
def _getAttributes(self, virtual_system, path, attribute):
|
||||
current_list = virtual_system.findall(path)
|
||||
results = [x.get(attribute) for x in current_list]
|
||||
return results
|
||||
|
||||
def __init__(self, ovf_file):
|
||||
self.xml = lxml.etree.parse(ovf_file)
|
||||
|
||||
# Define our schemas
|
||||
envelope_tag = self.xml.find(".")
|
||||
self.XML_SCHEMA = envelope_tag.nsmap.get('xsi')
|
||||
self.OVF_SCHEMA = envelope_tag.nsmap.get('ovf')
|
||||
self.RASD_SCHEMA = envelope_tag.nsmap.get('rasd')
|
||||
self.SASD_SCHEMA = envelope_tag.nsmap.get('sasd')
|
||||
self.VSSD_SCHEMA = envelope_tag.nsmap.get('vssd')
|
||||
|
||||
self.ovf_version = int(self.OVF_SCHEMA.split('/')[-1])
|
||||
|
||||
# Get the file and disk lists
|
||||
self.filelist = self._getFilelist()
|
||||
self.disklist = self._getDisklist()
|
||||
|
||||
def getVirtualSystems(self):
|
||||
return self.xml.findall("{{{schema}}}VirtualSystem".format(schema=self.OVF_SCHEMA))
|
||||
|
||||
def getXML(self):
|
||||
return lxml.etree.tostring(self.xml, pretty_print=True).decode('utf8')
|
||||
|
||||
def getVirtualHardware(self, virtual_system):
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
|
||||
)
|
||||
virtual_hardware = {}
|
||||
|
||||
for item in hardware_list:
|
||||
try:
|
||||
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
|
||||
except:
|
||||
continue
|
||||
quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA))
|
||||
if quantity is None:
|
||||
virtual_hardware[item_type] = 1
|
||||
else:
|
||||
virtual_hardware[item_type] = quantity.text
|
||||
|
||||
return virtual_hardware
|
||||
|
||||
def getDiskMap(self, virtual_system):
|
||||
# OVF v2 uses the StorageItem field, while v1 uses the normal Item field
|
||||
if self.ovf_version < 2:
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
|
||||
)
|
||||
else:
|
||||
hardware_list = virtual_system.findall(
|
||||
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA)
|
||||
)
|
||||
disk_list = []
|
||||
|
||||
for item in hardware_list:
|
||||
item_type = None
|
||||
|
||||
if self.SASD_SCHEMA is not None:
|
||||
item_type = self.RASD_TYPE[item.find("{{{sasd}}}ResourceType".format(sasd=self.SASD_SCHEMA)).text]
|
||||
else:
|
||||
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
|
||||
|
||||
if item_type != 'disk':
|
||||
continue
|
||||
|
||||
hostref = None
|
||||
if self.SASD_SCHEMA is not None:
|
||||
hostref = item.find("{{{sasd}}}HostResource".format(sasd=self.SASD_SCHEMA))
|
||||
else:
|
||||
hostref = item.find("{{{rasd}}}HostResource".format(rasd=self.RASD_SCHEMA))
|
||||
if hostref is None:
|
||||
continue
|
||||
disk_res = hostref.text
|
||||
|
||||
# Determine which file this disk_res ultimately represents
|
||||
(disk_id, disk_ref, disk_capacity, disk_capacity_unit) = [x for x in self.disklist if x[0] == disk_res.split('/')[-1]][0]
|
||||
(file_id, disk_src) = [x for x in self.filelist if x[0] == disk_ref][0]
|
||||
|
||||
if disk_capacity_unit is not None:
|
||||
# Handle the unit conversion
|
||||
base_unit, action, multiple = disk_capacity_unit.split()
|
||||
multiple_base, multiple_exponent = multiple.split('^')
|
||||
disk_capacity = int(disk_capacity) * ( int(multiple_base) ** int(multiple_exponent) )
|
||||
|
||||
# Append the disk with all details to the list
|
||||
disk_list.append({
|
||||
"id": disk_id,
|
||||
"ref": disk_ref,
|
||||
"capacity": disk_capacity,
|
||||
"src": disk_src
|
||||
})
|
||||
|
||||
return disk_list
|
File diff suppressed because it is too large
Load Diff
@ -13,26 +13,35 @@ else
|
||||
fi
|
||||
|
||||
HOSTS=( ${@} )
|
||||
echo "${HOSTS[@]}"
|
||||
echo "> Deploying to host(s): ${HOSTS[@]}"
|
||||
|
||||
# Build the packages
|
||||
$SUDO ./build-deb.sh
|
||||
echo -n "Building packages... "
|
||||
version="$( ./build-unstable-deb.sh 2>/dev/null )"
|
||||
echo "done. Package version ${version}."
|
||||
|
||||
# Install the client(s) locally
|
||||
$SUDO dpkg -i ../pvc-client*.deb
|
||||
echo -n "Installing client packages locally... "
|
||||
$SUDO dpkg -i ../pvc-client*_${version}*.deb &>/dev/null
|
||||
echo "done".
|
||||
|
||||
for HOST in ${HOSTS[@]}; do
|
||||
echo "****"
|
||||
echo "Deploying to host ${HOST}"
|
||||
echo "****"
|
||||
ssh $HOST $SUDO rm -rf /tmp/pvc
|
||||
ssh $HOST mkdir /tmp/pvc
|
||||
scp ../*.deb $HOST:/tmp/pvc/
|
||||
ssh $HOST $SUDO dpkg -i /tmp/pvc/*.deb
|
||||
ssh $HOST $SUDO systemctl restart pvcd
|
||||
ssh $HOST rm -rf /tmp/pvc
|
||||
echo "****"
|
||||
echo "Waiting 10s for host ${HOST} to stabilize"
|
||||
echo "****"
|
||||
sleep 10
|
||||
echo "> Deploying packages to host ${HOST}"
|
||||
echo -n "Copying packages... "
|
||||
ssh $HOST $SUDO rm -rf /tmp/pvc &>/dev/null
|
||||
ssh $HOST mkdir /tmp/pvc &>/dev/null
|
||||
scp ../pvc-*_${version}*.deb $HOST:/tmp/pvc/ &>/dev/null
|
||||
echo "done."
|
||||
echo -n "Installing packages... "
|
||||
ssh $HOST $SUDO dpkg -i /tmp/pvc/{pvc-client-cli,pvc-daemon-common,pvc-daemon-api,pvc-daemon-node}*.deb &>/dev/null
|
||||
ssh $HOST rm -rf /tmp/pvc &>/dev/null
|
||||
echo "done."
|
||||
echo -n "Restarting PVC daemons... "
|
||||
ssh $HOST $SUDO systemctl restart pvcapid &>/dev/null
|
||||
ssh $HOST $SUDO systemctl restart pvcapid-worker &>/dev/null
|
||||
ssh $HOST $SUDO systemctl restart pvcnoded &>/dev/null
|
||||
echo "done."
|
||||
echo -n "Waiting 15s for host to stabilize... "
|
||||
sleep 15
|
||||
echo "done."
|
||||
done
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/sh
|
||||
ver="0.6"
|
||||
ver="$( head -1 debian/changelog | awk -F'[()-]' '{ print $2 }' )"
|
||||
git pull
|
||||
rm ../pvc_*
|
||||
dh_make -p pvc_${ver} --createorig --single --yes
|
||||
|
33
build-unstable-deb.sh
Executable file
33
build-unstable-deb.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/sh
|
||||
set -o xtrace
|
||||
exec 3>&1
|
||||
exec 1>&2
|
||||
# Ensure we're up to date
|
||||
git pull --rebase
|
||||
# Update the version to a sensible git revision for easy visualization
|
||||
base_ver="$( head -1 debian/changelog | awk -F'[()-]' '{ print $2 }' )"
|
||||
new_ver="${base_ver}~git-$(git rev-parse --short HEAD)"
|
||||
echo ${new_ver} >&3
|
||||
# Back up the existing changelog and Daemon.py files
|
||||
tmpdir=$( mktemp -d )
|
||||
cp -a debian/changelog node-daemon/pvcnoded/Daemon.py ${tmpdir}/
|
||||
# Replace the "base" version with the git revision version
|
||||
sed -i "s/version = '${base_ver}'/version = '${new_ver}'/" node-daemon/pvcnoded/Daemon.py
|
||||
sed -i "s/${base_ver}-0/${new_ver}/" debian/changelog
|
||||
cat <<EOF > debian/changelog
|
||||
pvc (${new_ver}) unstable; urgency=medium
|
||||
|
||||
* Unstable revision for commit $(git rev-parse --short HEAD)
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> $( date -R )
|
||||
EOF
|
||||
# Build source tarball
|
||||
dh_make -p pvc_${new_ver} --createorig --single --yes
|
||||
# Build packages
|
||||
dpkg-buildpackage -us -uc
|
||||
# Restore original changelog and Daemon.py files
|
||||
cp -a ${tmpdir}/changelog debian/changelog
|
||||
cp -a ${tmpdir}/Daemon.py node-daemon/pvcnoded/Daemon.py
|
||||
# Clean up
|
||||
rm -r ${tmpdir}
|
||||
dh_clean
|
@ -1 +0,0 @@
|
||||
../client-common
|
@ -1,11 +0,0 @@
|
||||
CREATE TABLE system_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, vcpu_count INT NOT NULL, vram_mb INT NOT NULL, serial BOOL NOT NULL, vnc BOOL NOT NULL, vnc_bind TEXT, node_limit TEXT, node_selector TEXT, node_autostart BOOL NOT NULL);
|
||||
CREATE TABLE network_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, mac_template TEXT);
|
||||
CREATE TABLE network (id SERIAL PRIMARY KEY, network_template INT REFERENCES network_template(id), vni INT NOT NULL);
|
||||
CREATE TABLE storage_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE);
|
||||
CREATE TABLE storage (id SERIAL PRIMARY KEY, storage_template INT REFERENCES storage_template(id), pool TEXT NOT NULL, disk_id TEXT NOT NULL, source_volume TEXT, disk_size_gb INT, mountpoint TEXT, filesystem TEXT, filesystem_args TEXT);
|
||||
CREATE TABLE userdata (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, userdata TEXT NOT NULL);
|
||||
CREATE TABLE script (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, script TEXT NOT NULL);
|
||||
CREATE TABLE profile (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, system_template INT REFERENCES system_template(id), network_template INT REFERENCES network_template(id), storage_template INT REFERENCES storage_template(id), userdata INT REFERENCES userdata(id), script INT REFERENCES script(id), arguments text);
|
||||
|
||||
INSERT INTO userdata (name, userdata) VALUES ('empty', '');
|
||||
INSERT INTO script (name, script) VALUES ('empty', '');
|
@ -1,16 +0,0 @@
|
||||
# Parallel Virtual Cluster Provisioner client worker unit file
|
||||
|
||||
[Unit]
|
||||
Description = Parallel Virtual Cluster Provisioner worker
|
||||
After = network-online.target
|
||||
|
||||
[Service]
|
||||
Type = simple
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
Environment = PYTHONUNBUFFERED=true
|
||||
Environment = PVC_CONFIG_FILE=/etc/pvc/pvc-api.yaml
|
||||
ExecStart = /usr/bin/celery worker -A pvc-api.celery --concurrency 1 --loglevel INFO
|
||||
Restart = on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy = multi-user.target
|
@ -25,8 +25,10 @@ import json
|
||||
import time
|
||||
import math
|
||||
|
||||
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
|
||||
|
||||
import cli_lib.ansiprint as ansiprint
|
||||
from cli_lib.common import call_api
|
||||
from cli_lib.common import UploadProgressBar, call_api
|
||||
|
||||
#
|
||||
# Supplemental functions
|
||||
@ -112,7 +114,7 @@ def ceph_status(config):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_util(config):
|
||||
"""
|
||||
@ -127,7 +129,7 @@ def ceph_util(config):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def format_raw_output(status_data):
|
||||
ainformation = list()
|
||||
@ -153,7 +155,7 @@ def ceph_osd_info(config, osd):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_osd_list(config, limit):
|
||||
"""
|
||||
@ -172,7 +174,7 @@ def ceph_osd_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_osd_add(config, node, device, weight):
|
||||
"""
|
||||
@ -194,7 +196,7 @@ def ceph_osd_add(config, node, device, weight):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_osd_remove(config, osdid):
|
||||
"""
|
||||
@ -214,7 +216,7 @@ def ceph_osd_remove(config, osdid):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_osd_state(config, osdid, state):
|
||||
"""
|
||||
@ -234,7 +236,7 @@ def ceph_osd_state(config, osdid, state):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_osd_option(config, option, action):
|
||||
"""
|
||||
@ -255,7 +257,7 @@ def ceph_osd_option(config, option, action):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def getOutputColoursOSD(osd_information):
|
||||
# Set the UP status
|
||||
@ -315,11 +317,17 @@ def format_list_osd(osd_list):
|
||||
osd_information['stats']['size'] = osd_information['stats']['kb'] * 1024
|
||||
for datatype in 'size', 'wr_data', 'rd_data':
|
||||
databytes = osd_information['stats'][datatype]
|
||||
databytes_formatted = format_bytes_tohuman(int(databytes))
|
||||
if isinstance(databytes, int):
|
||||
databytes_formatted = format_bytes_tohuman(databytes)
|
||||
else:
|
||||
databytes_formatted = databytes
|
||||
osd_information['stats'][datatype] = databytes_formatted
|
||||
for datatype in 'wr_ops', 'rd_ops':
|
||||
dataops = osd_information['stats'][datatype]
|
||||
dataops_formatted = format_ops_tohuman(int(dataops))
|
||||
if isinstance(dataops, int):
|
||||
dataops_formatted = format_ops_tohuman(dataops)
|
||||
else:
|
||||
dataops_formatted = dataops
|
||||
osd_information['stats'][datatype] = dataops_formatted
|
||||
|
||||
# Set the OSD ID length
|
||||
@ -443,7 +451,7 @@ Wr: {osd_wrops: <{osd_wrops_length}} \
|
||||
)
|
||||
)
|
||||
|
||||
for osd_information in osd_list:
|
||||
for osd_information in sorted(osd_list, key = lambda x: int(x['id'])):
|
||||
try:
|
||||
# If this happens, the node hasn't checked in fully yet, so just ignore it
|
||||
if osd_information['stats']['node'] == '|':
|
||||
@ -514,7 +522,7 @@ Wr: {osd_wrops: <{osd_wrops_length}} \
|
||||
)
|
||||
)
|
||||
|
||||
return '\n'.join(sorted(osd_list_output))
|
||||
return '\n'.join(osd_list_output)
|
||||
|
||||
|
||||
#
|
||||
@ -533,7 +541,7 @@ def ceph_pool_info(config, pool):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_pool_list(config, limit):
|
||||
"""
|
||||
@ -552,7 +560,7 @@ def ceph_pool_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_pool_add(config, pool, pgs, replcfg):
|
||||
"""
|
||||
@ -574,7 +582,7 @@ def ceph_pool_add(config, pool, pgs, replcfg):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_pool_remove(config, pool):
|
||||
"""
|
||||
@ -594,7 +602,7 @@ def ceph_pool_remove(config, pool):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def format_list_pool(pool_list):
|
||||
# Handle empty list
|
||||
@ -744,7 +752,7 @@ Wr: {pool_write_ops: <{pool_write_ops_length}} \
|
||||
)
|
||||
)
|
||||
|
||||
for pool_information in pool_list:
|
||||
for pool_information in sorted(pool_list, key = lambda x: int(x['stats']['id'])):
|
||||
# Format the output header
|
||||
pool_list_output.append('{bold}\
|
||||
{pool_id: <{pool_id_length}} \
|
||||
@ -792,7 +800,8 @@ Wr: {pool_write_ops: <{pool_write_ops_length}} \
|
||||
)
|
||||
)
|
||||
|
||||
return '\n'.join(sorted(pool_list_output))
|
||||
return '\n'.join(pool_list_output)
|
||||
|
||||
|
||||
#
|
||||
# Volume functions
|
||||
@ -810,7 +819,7 @@ def ceph_volume_info(config, pool, volume):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_list(config, limit, pool):
|
||||
"""
|
||||
@ -831,7 +840,7 @@ def ceph_volume_list(config, limit, pool):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_add(config, pool, volume, size):
|
||||
"""
|
||||
@ -853,7 +862,42 @@ def ceph_volume_add(config, pool, volume, size):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_upload(config, pool, volume, image_format, image_file):
|
||||
"""
|
||||
Upload a disk image to a Ceph volume
|
||||
|
||||
API endpoint: POST /api/v1/storage/ceph/volume/{pool}/{volume}/upload
|
||||
API arguments: image_format={image_format}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
import click
|
||||
|
||||
bar = UploadProgressBar(image_file, end_message="Parsing file on remote side...", end_nl=False)
|
||||
upload_data = MultipartEncoder(
|
||||
fields={ 'file': ('filename', open(image_file, 'rb'), 'application/octet-stream')}
|
||||
)
|
||||
upload_monitor = MultipartEncoderMonitor(upload_data, bar.update)
|
||||
|
||||
headers = {
|
||||
"Content-Type": upload_monitor.content_type
|
||||
}
|
||||
params = {
|
||||
'image_format': image_format
|
||||
}
|
||||
|
||||
response = call_api(config, 'post', '/storage/ceph/volume/{}/{}/upload'.format(pool, volume), headers=headers, params=params, data=upload_monitor)
|
||||
|
||||
click.echo("done.")
|
||||
click.echo()
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_remove(config, pool, volume):
|
||||
"""
|
||||
@ -870,7 +914,7 @@ def ceph_volume_remove(config, pool, volume):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None):
|
||||
"""
|
||||
@ -894,7 +938,7 @@ def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_volume_clone(config, pool, volume, new_volume):
|
||||
"""
|
||||
@ -914,7 +958,7 @@ def ceph_volume_clone(config, pool, volume, new_volume):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def format_list_volume(volume_list):
|
||||
# Handle empty list
|
||||
@ -1047,7 +1091,7 @@ def ceph_snapshot_info(config, pool, volume, snapshot):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_snapshot_list(config, limit, volume, pool):
|
||||
"""
|
||||
@ -1070,7 +1114,7 @@ def ceph_snapshot_list(config, limit, volume, pool):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ceph_snapshot_add(config, pool, volume, snapshot):
|
||||
"""
|
||||
@ -1092,7 +1136,7 @@ def ceph_snapshot_add(config, pool, volume, snapshot):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_snapshot_remove(config, pool, volume, snapshot):
|
||||
"""
|
||||
@ -1109,7 +1153,7 @@ def ceph_snapshot_remove(config, pool, volume, snapshot):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None):
|
||||
"""
|
||||
@ -1131,7 +1175,7 @@ def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def format_list_snapshot(snapshot_list):
|
||||
# Handle empty list
|
||||
@ -1147,10 +1191,10 @@ def format_list_snapshot(snapshot_list):
|
||||
snapshot_volume_length = 7
|
||||
snapshot_pool_length = 5
|
||||
|
||||
for snapshot in snapshot_list:
|
||||
snapshot_name = snapshot['snapshot']
|
||||
snapshot_volume = snapshot['volume']
|
||||
snapshot_pool = snapshot['pool']
|
||||
for snapshot_information in snapshot_list:
|
||||
snapshot_name = snapshot_information['snapshot']
|
||||
snapshot_volume = snapshot_information['volume']
|
||||
snapshot_pool = snapshot_information['pool']
|
||||
|
||||
# Set the Snapshot name length
|
||||
_snapshot_name_length = len(snapshot_name) + 1
|
||||
@ -1184,10 +1228,10 @@ def format_list_snapshot(snapshot_list):
|
||||
)
|
||||
)
|
||||
|
||||
for snapshot in snapshot_list:
|
||||
snapshot_name = snapshot['snapshot']
|
||||
snapshot_volume = snapshot['volume']
|
||||
snapshot_pool = snapshot['pool']
|
||||
for snapshot_information in snapshot_list:
|
||||
snapshot_name = snapshot_information['snapshot']
|
||||
snapshot_volume = snapshot_information['volume']
|
||||
snapshot_pool = snapshot_information['pool']
|
||||
snapshot_list_output.append('{bold}\
|
||||
{snapshot_name: <{snapshot_name_length}} \
|
||||
{snapshot_volume: <{snapshot_volume_length}} \
|
||||
@ -1205,3 +1249,457 @@ def format_list_snapshot(snapshot_list):
|
||||
)
|
||||
|
||||
return '\n'.join(sorted(snapshot_list_output))
|
||||
|
||||
#
|
||||
# Benchmark functions
|
||||
#
|
||||
def ceph_benchmark_run(config, pool):
|
||||
"""
|
||||
Run a storage benchmark against {pool}
|
||||
|
||||
API endpoint: POST /api/v1/storage/ceph/benchmark
|
||||
API arguments: pool={pool}
|
||||
API schema: {message}
|
||||
"""
|
||||
params = {
|
||||
'pool': pool
|
||||
}
|
||||
response = call_api(config, 'post', '/storage/ceph/benchmark', params=params)
|
||||
|
||||
if response.status_code == 202:
|
||||
retvalue = True
|
||||
retdata = 'Task ID: {}'.format(response.json()['task_id'])
|
||||
else:
|
||||
retvalue = False
|
||||
retdata = response.json().get('message', '')
|
||||
|
||||
return retvalue, retdata
|
||||
|
||||
def ceph_benchmark_list(config, job):
|
||||
"""
|
||||
View results of one or more previous benchmark runs
|
||||
|
||||
API endpoint: GET /api/v1/storage/ceph/benchmark
|
||||
API arguments: job={job}
|
||||
API schema: {results}
|
||||
"""
|
||||
if job is not None:
|
||||
params = {
|
||||
'job': job
|
||||
}
|
||||
else:
|
||||
params = {}
|
||||
|
||||
response = call_api(config, 'get', '/storage/ceph/benchmark', params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retvalue = True
|
||||
retdata = response.json()
|
||||
else:
|
||||
retvalue = False
|
||||
retdata = response.json().get('message', '')
|
||||
|
||||
return retvalue, retdata
|
||||
|
||||
def format_list_benchmark(config, benchmark_information):
|
||||
benchmark_list_output = []
|
||||
|
||||
benchmark_id_length = 3
|
||||
benchmark_job_length = 20
|
||||
benchmark_bandwidth_length = dict()
|
||||
benchmark_iops_length = dict()
|
||||
|
||||
# For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result.
|
||||
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]:
|
||||
benchmark_bandwidth_length[test] = 7
|
||||
benchmark_iops_length[test] = 6
|
||||
|
||||
for benchmark in benchmark_information:
|
||||
benchmark_job = benchmark['job']
|
||||
_benchmark_job_length = len(benchmark_job)
|
||||
if _benchmark_job_length > benchmark_job_length:
|
||||
benchmark_job_length = _benchmark_job_length
|
||||
|
||||
if benchmark['benchmark_result'] == 'Running':
|
||||
continue
|
||||
benchmark_data = json.loads(benchmark['benchmark_result'])
|
||||
|
||||
benchmark_bandwidth = dict()
|
||||
benchmark_iops = dict()
|
||||
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]:
|
||||
benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024)
|
||||
benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops']))
|
||||
|
||||
_benchmark_bandwidth_length = len(benchmark_bandwidth[test]) + 1
|
||||
if _benchmark_bandwidth_length > benchmark_bandwidth_length[test]:
|
||||
benchmark_bandwidth_length[test] = _benchmark_bandwidth_length
|
||||
|
||||
_benchmark_iops_length = len(benchmark_iops[test]) + 1
|
||||
if _benchmark_iops_length > benchmark_bandwidth_length[test]:
|
||||
benchmark_iops_length[test] = _benchmark_iops_length
|
||||
|
||||
# Format the output header line 1
|
||||
benchmark_list_output.append('{bold}\
|
||||
{benchmark_job: <{benchmark_job_length}} \
|
||||
{seq_header: <{seq_header_length}} \
|
||||
{rand_header: <{rand_header_length}} \
|
||||
{end_bold}'.format(
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
benchmark_job_length=benchmark_job_length,
|
||||
seq_header_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'] + 3,
|
||||
rand_header_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'] + 2,
|
||||
benchmark_job='Benchmark Job',
|
||||
seq_header='Sequential (4M blocks):',
|
||||
rand_header='Random (4K blocks):'
|
||||
)
|
||||
)
|
||||
|
||||
benchmark_list_output.append('{bold}\
|
||||
{benchmark_job: <{benchmark_job_length}} \
|
||||
{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \
|
||||
{seq_benchmark_iops: <{seq_benchmark_iops_length}} \
|
||||
{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \
|
||||
{rand_benchmark_iops: <{rand_benchmark_iops_length}} \
|
||||
{end_bold}'.format(
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
benchmark_job_length=benchmark_job_length,
|
||||
seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2,
|
||||
seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'],
|
||||
rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1,
|
||||
rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'],
|
||||
benchmark_job='',
|
||||
seq_benchmark_bandwidth='R/W Bandwith/s',
|
||||
seq_benchmark_iops='R/W IOPS',
|
||||
rand_benchmark_bandwidth='R/W Bandwith/s',
|
||||
rand_benchmark_iops='R/W IOPS'
|
||||
)
|
||||
)
|
||||
|
||||
for benchmark in benchmark_information:
|
||||
benchmark_job = benchmark['job']
|
||||
|
||||
if benchmark['benchmark_result'] == 'Running':
|
||||
seq_benchmark_bandwidth = 'Running'
|
||||
seq_benchmark_iops = 'Running'
|
||||
rand_benchmark_bandwidth = 'Running'
|
||||
rand_benchmark_iops = 'Running'
|
||||
else:
|
||||
benchmark_bandwidth = dict()
|
||||
benchmark_iops = dict()
|
||||
for test in [ "seq_read", "seq_write", "rand_read_4K", "rand_write_4K" ]:
|
||||
benchmark_data = json.loads(benchmark['benchmark_result'])
|
||||
benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024)
|
||||
benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops']))
|
||||
|
||||
seq_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['seq_read'], benchmark_bandwidth['seq_write'])
|
||||
seq_benchmark_iops = "{} / {}".format(benchmark_iops['seq_read'], benchmark_iops['seq_write'])
|
||||
rand_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['rand_read_4K'], benchmark_bandwidth['rand_write_4K'])
|
||||
rand_benchmark_iops = "{} / {}".format(benchmark_iops['rand_read_4K'], benchmark_iops['rand_write_4K'])
|
||||
|
||||
|
||||
benchmark_list_output.append('{bold}\
|
||||
{benchmark_job: <{benchmark_job_length}} \
|
||||
{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \
|
||||
{seq_benchmark_iops: <{seq_benchmark_iops_length}} \
|
||||
{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \
|
||||
{rand_benchmark_iops: <{rand_benchmark_iops_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
benchmark_job_length=benchmark_job_length,
|
||||
seq_benchmark_bandwidth_length=benchmark_bandwidth_length['seq_read'] + benchmark_bandwidth_length['seq_write'] + 2,
|
||||
seq_benchmark_iops_length=benchmark_iops_length['seq_read'] + benchmark_iops_length['seq_write'],
|
||||
rand_benchmark_bandwidth_length=benchmark_bandwidth_length['rand_read_4K'] + benchmark_bandwidth_length['rand_write_4K'] + 1,
|
||||
rand_benchmark_iops_length=benchmark_iops_length['rand_read_4K'] + benchmark_iops_length['rand_write_4K'],
|
||||
benchmark_job=benchmark_job,
|
||||
seq_benchmark_bandwidth=seq_benchmark_bandwidth,
|
||||
seq_benchmark_iops=seq_benchmark_iops,
|
||||
rand_benchmark_bandwidth=rand_benchmark_bandwidth,
|
||||
rand_benchmark_iops=rand_benchmark_iops
|
||||
)
|
||||
)
|
||||
|
||||
return '\n'.join(benchmark_list_output)
|
||||
|
||||
def format_info_benchmark(config, benchmark_information):
|
||||
# Load information from benchmark output
|
||||
benchmark_id = benchmark_information[0]['id']
|
||||
benchmark_job = benchmark_information[0]['job']
|
||||
|
||||
if benchmark_information[0]['benchmark_result'] == "Running":
|
||||
return "Benchmark test is still running."
|
||||
|
||||
benchmark_details = json.loads(benchmark_information[0]['benchmark_result'])
|
||||
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
ainformation = []
|
||||
ainformation.append('{}Storage Benchmark details:{}'.format(ansiprint.bold(), ansiprint.end()))
|
||||
|
||||
nice_test_name_map = {
|
||||
"seq_read": "Sequential Read (4M blocks)",
|
||||
"seq_write": "Sequential Write (4M blocks)",
|
||||
"rand_read_4M": "Random Read (4M blocks)",
|
||||
"rand_write_4M": "Random Write (4M blocks)",
|
||||
"rand_read_256K": "Random Read (256K blocks)",
|
||||
"rand_write_256K": "Random Write (256K blocks)",
|
||||
"rand_read_4K": "Random Read (4K blocks)",
|
||||
"rand_write_4K": "Random Write (4K blocks)"
|
||||
}
|
||||
|
||||
test_name_length = 30
|
||||
overall_label_length = 12
|
||||
overall_column_length = 8
|
||||
bandwidth_label_length = 9
|
||||
bandwidth_column_length = 10
|
||||
iops_column_length = 6
|
||||
latency_column_length = 8
|
||||
cpuutil_label_length = 11
|
||||
cpuutil_column_length = 9
|
||||
|
||||
for test in benchmark_details:
|
||||
_test_name_length = len(nice_test_name_map[test])
|
||||
if _test_name_length > test_name_length:
|
||||
test_name_length = _test_name_length
|
||||
|
||||
for element in benchmark_details[test]['overall']:
|
||||
_element_length = len(benchmark_details[test]['overall'][element])
|
||||
if _element_length > overall_column_length:
|
||||
overall_column_length = _element_length
|
||||
|
||||
for element in benchmark_details[test]['bandwidth']:
|
||||
try:
|
||||
_element_length = len(format_bytes_tohuman(int(float(benchmark_details[test]['bandwidth'][element]))))
|
||||
except:
|
||||
_element_length = len(benchmark_details[test]['bandwidth'][element])
|
||||
if _element_length > bandwidth_column_length:
|
||||
bandwidth_column_length = _element_length
|
||||
|
||||
for element in benchmark_details[test]['iops']:
|
||||
try:
|
||||
_element_length = len(format_ops_tohuman(int(float(benchmark_details[test]['iops'][element]))))
|
||||
except:
|
||||
_element_length = len(benchmark_details[test]['iops'][element])
|
||||
if _element_length > iops_column_length:
|
||||
iops_column_length = _element_length
|
||||
|
||||
for element in benchmark_details[test]['latency']:
|
||||
_element_length = len(benchmark_details[test]['latency'][element])
|
||||
if _element_length > latency_column_length:
|
||||
latency_column_length = _element_length
|
||||
|
||||
for element in benchmark_details[test]['cpu']:
|
||||
_element_length = len(benchmark_details[test]['cpu'][element])
|
||||
if _element_length > cpuutil_column_length:
|
||||
cpuutil_column_length = _element_length
|
||||
|
||||
|
||||
|
||||
for test in benchmark_details:
|
||||
ainformation.append('')
|
||||
|
||||
test_details = benchmark_details[test]
|
||||
|
||||
# Top row (Headers)
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: <{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: <{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: <{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
test_name='Test:',
|
||||
test_name_length=test_name_length,
|
||||
overall_label='',
|
||||
overall_label_length=overall_label_length,
|
||||
overall="General",
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth="Bandwidth",
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops="IOPS",
|
||||
iops_length=iops_column_length,
|
||||
latency="Latency (μs)",
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil="CPU Util",
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
# Second row (Test, Size, Min, User))
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: >{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: >{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: >{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
test_name=nice_test_name_map[test],
|
||||
test_name_length=test_name_length,
|
||||
overall_label='Test Size:',
|
||||
overall_label_length=overall_label_length,
|
||||
overall=format_bytes_tohuman(int(test_details['overall']['iosize']) * 1024),
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='Min:',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth=format_bytes_tohuman(int(test_details['bandwidth']['min']) * 1024),
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops=format_ops_tohuman(int(test_details['iops']['min'])),
|
||||
iops_length=iops_column_length,
|
||||
latency=test_details['latency']['min'],
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='User:',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil=test_details['cpu']['user'],
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
# Third row (blank, BW/s, Max, System))
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: >{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: >{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: >{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
test_name='',
|
||||
test_name_length=test_name_length,
|
||||
overall_label='Bandwidth/s:',
|
||||
overall_label_length=overall_label_length,
|
||||
overall=format_bytes_tohuman(int(test_details['overall']['bandwidth']) * 1024),
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='Max:',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth=format_bytes_tohuman(int(test_details['bandwidth']['max']) * 1024),
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops=format_ops_tohuman(int(test_details['iops']['max'])),
|
||||
iops_length=iops_column_length,
|
||||
latency=test_details['latency']['max'],
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='System:',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil=test_details['cpu']['system'],
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
# Fourth row (blank, IOPS, Mean, CtxSq))
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: >{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: >{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: >{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
test_name='',
|
||||
test_name_length=test_name_length,
|
||||
overall_label='IOPS:',
|
||||
overall_label_length=overall_label_length,
|
||||
overall=format_ops_tohuman(int(test_details['overall']['iops'])),
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='Mean:',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth=format_bytes_tohuman(int(float(test_details['bandwidth']['mean'])) * 1024),
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops=format_ops_tohuman(int(float(test_details['iops']['mean']))),
|
||||
iops_length=iops_column_length,
|
||||
latency=test_details['latency']['mean'],
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='CtxSw:',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil=test_details['cpu']['ctxsw'],
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
# Fifth row (blank, Runtime, StdDev, MajFault))
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: >{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: >{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: >{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
test_name='',
|
||||
test_name_length=test_name_length,
|
||||
overall_label='Runtime (s):',
|
||||
overall_label_length=overall_label_length,
|
||||
overall=int(test_details['overall']['runtime']) / 1000.0,
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='StdDev:',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth=format_bytes_tohuman(int(float(test_details['bandwidth']['stdev'])) * 1024),
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops=format_ops_tohuman(int(float(test_details['iops']['stdev']))),
|
||||
iops_length=iops_column_length,
|
||||
latency=test_details['latency']['stdev'],
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='MajFault:',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil=test_details['cpu']['majfault'],
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
# Sixth row (blank, blank, Samples, MinFault))
|
||||
ainformation.append('{bold}\
|
||||
{test_name: <{test_name_length}} \
|
||||
{overall_label: >{overall_label_length}} \
|
||||
{overall: <{overall_length}} \
|
||||
{bandwidth_label: >{bandwidth_label_length}} \
|
||||
{bandwidth: <{bandwidth_length}} \
|
||||
{iops: <{iops_length}} \
|
||||
{latency: <{latency_length}} \
|
||||
{cpuutil_label: >{cpuutil_label_length}} \
|
||||
{cpuutil: <{cpuutil_length}} \
|
||||
{end_bold}'.format(
|
||||
bold='',
|
||||
end_bold='',
|
||||
test_name='',
|
||||
test_name_length=test_name_length,
|
||||
overall_label='',
|
||||
overall_label_length=overall_label_length,
|
||||
overall='',
|
||||
overall_length=overall_column_length,
|
||||
bandwidth_label='Samples:',
|
||||
bandwidth_label_length=bandwidth_label_length,
|
||||
bandwidth=test_details['bandwidth']['numsamples'],
|
||||
bandwidth_length=bandwidth_column_length,
|
||||
iops=test_details['iops']['numsamples'],
|
||||
iops_length=iops_column_length,
|
||||
latency='',
|
||||
latency_length=latency_column_length,
|
||||
cpuutil_label='MinFault:',
|
||||
cpuutil_label_length=cpuutil_label_length,
|
||||
cpuutil=test_details['cpu']['minfault'],
|
||||
cpuutil_length=cpuutil_column_length
|
||||
))
|
||||
|
||||
ainformation.append('')
|
||||
|
||||
return '\n'.join(ainformation)
|
||||
|
@ -33,14 +33,14 @@ def initialize(config):
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, 'get', '/initialize')
|
||||
response = call_api(config, 'post', '/initialize')
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def maintenance_mode(config, state):
|
||||
"""
|
||||
@ -60,7 +60,7 @@ def maintenance_mode(config, state):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def get_info(config):
|
||||
"""
|
||||
@ -75,7 +75,7 @@ def get_info(config):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def format_info(cluster_information, oformat):
|
||||
if oformat == 'json':
|
||||
@ -92,10 +92,29 @@ def format_info(cluster_information, oformat):
|
||||
else:
|
||||
health_colour = ansiprint.yellow()
|
||||
|
||||
if cluster_information['storage_health'] == 'Optimal':
|
||||
storage_health_colour = ansiprint.green()
|
||||
elif cluster_information['storage_health'] == 'Maintenance':
|
||||
storage_health_colour = ansiprint.blue()
|
||||
else:
|
||||
storage_health_colour = ansiprint.yellow()
|
||||
|
||||
ainformation = []
|
||||
ainformation.append('{}PVC cluster status:{}'.format(ansiprint.bold(), ansiprint.end()))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Cluster health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), health_colour, cluster_information['health'], ansiprint.end()))
|
||||
if cluster_information['health_msg']:
|
||||
for line in cluster_information['health_msg']:
|
||||
ainformation.append(
|
||||
' > {}'.format(line)
|
||||
)
|
||||
ainformation.append('{}Storage health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), storage_health_colour, cluster_information['storage_health'], ansiprint.end()))
|
||||
if cluster_information['storage_health_msg']:
|
||||
for line in cluster_information['storage_health_msg']:
|
||||
ainformation.append(
|
||||
' > {}'.format(line)
|
||||
)
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Primary node:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['primary_node']))
|
||||
ainformation.append('{}Cluster upstream IP:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['upstream_ip']))
|
||||
ainformation.append('')
|
||||
|
@ -20,10 +20,99 @@
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import os
|
||||
import io
|
||||
import math
|
||||
import time
|
||||
import requests
|
||||
import click
|
||||
from urllib3 import disable_warnings
|
||||
|
||||
def call_api(config, operation, request_uri, params=None, data=None):
|
||||
def format_bytes(size_bytes):
|
||||
byte_unit_matrix = {
|
||||
'B': 1,
|
||||
'K': 1024,
|
||||
'M': 1024*1024,
|
||||
'G': 1024*1024*1024,
|
||||
'T': 1024*1024*1024*1024,
|
||||
'P': 1024*1024*1024*1024*1024
|
||||
}
|
||||
human_bytes = '0B'
|
||||
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get):
|
||||
formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit]))
|
||||
if formatted_bytes < 10000:
|
||||
human_bytes = '{}{}'.format(formatted_bytes, unit)
|
||||
break
|
||||
return human_bytes
|
||||
|
||||
def format_metric(integer):
|
||||
integer_unit_matrix = {
|
||||
'': 1,
|
||||
'K': 1000,
|
||||
'M': 1000*1000,
|
||||
'B': 1000*1000*1000,
|
||||
'T': 1000*1000*1000*1000,
|
||||
'Q': 1000*1000*1000*1000*1000
|
||||
}
|
||||
human_integer = '0'
|
||||
for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get):
|
||||
formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit]))
|
||||
if formatted_integer < 10000:
|
||||
human_integer = '{}{}'.format(formatted_integer, unit)
|
||||
break
|
||||
return human_integer
|
||||
|
||||
class UploadProgressBar(object):
|
||||
def __init__(self, filename, end_message='', end_nl=True):
|
||||
file_size = os.path.getsize(filename)
|
||||
file_size_human = format_bytes(file_size)
|
||||
click.echo("Uploading file (total size {})...".format(file_size_human))
|
||||
|
||||
self.length = file_size
|
||||
self.time_last = int(round(time.time() * 1000)) - 1000
|
||||
self.bytes_last = 0
|
||||
self.bytes_diff = 0
|
||||
self.is_end = False
|
||||
|
||||
self.end_message = end_message
|
||||
self.end_nl = end_nl
|
||||
if not self.end_nl:
|
||||
self.end_suffix = ' '
|
||||
else:
|
||||
self.end_suffix = ''
|
||||
|
||||
self.bar = click.progressbar(length=self.length, show_eta=True)
|
||||
|
||||
def update(self, monitor):
|
||||
bytes_cur = monitor.bytes_read
|
||||
self.bytes_diff += bytes_cur - self.bytes_last
|
||||
if self.bytes_last == bytes_cur:
|
||||
self.is_end = True
|
||||
self.bytes_last = bytes_cur
|
||||
|
||||
time_cur = int(round(time.time() * 1000))
|
||||
if (time_cur - 1000) > self.time_last:
|
||||
self.time_last = time_cur
|
||||
self.bar.update(self.bytes_diff)
|
||||
self.bytes_diff = 0
|
||||
|
||||
if self.is_end:
|
||||
self.bar.update(self.bytes_diff)
|
||||
self.bytes_diff = 0
|
||||
click.echo()
|
||||
click.echo()
|
||||
if self.end_message:
|
||||
click.echo(self.end_message + self.end_suffix, nl=self.end_nl)
|
||||
|
||||
class ErrorResponse(requests.Response):
|
||||
def __init__(self, json_data, status_code):
|
||||
self.json_data = json_data
|
||||
self.status_code = status_code
|
||||
|
||||
def json(self):
|
||||
return self.json_data
|
||||
|
||||
def call_api(config, operation, request_uri, headers={}, params=None, data=None, files=None):
|
||||
# Craft the URI
|
||||
uri = '{}://{}{}{}'.format(
|
||||
config['api_scheme'],
|
||||
@ -34,50 +123,56 @@ def call_api(config, operation, request_uri, params=None, data=None):
|
||||
|
||||
# Craft the authentication header if required
|
||||
if config['api_key']:
|
||||
headers = {'X-Api-Key': config['api_key']}
|
||||
else:
|
||||
headers = None
|
||||
headers['X-Api-Key'] = config['api_key']
|
||||
|
||||
# Determine the request type and hit the API
|
||||
disable_warnings()
|
||||
try:
|
||||
if operation == 'get':
|
||||
response = requests.get(
|
||||
uri,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data
|
||||
data=data,
|
||||
verify=config['verify_ssl']
|
||||
)
|
||||
if operation == 'post':
|
||||
response = requests.post(
|
||||
uri,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data
|
||||
data=data,
|
||||
files=files,
|
||||
verify=config['verify_ssl']
|
||||
)
|
||||
if operation == 'put':
|
||||
response = requests.put(
|
||||
uri,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data
|
||||
data=data,
|
||||
files=files,
|
||||
verify=config['verify_ssl']
|
||||
)
|
||||
if operation == 'patch':
|
||||
response = requests.patch(
|
||||
uri,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data
|
||||
data=data,
|
||||
verify=config['verify_ssl']
|
||||
)
|
||||
if operation == 'delete':
|
||||
response = requests.delete(
|
||||
uri,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data
|
||||
data=data,
|
||||
verify=config['verify_ssl']
|
||||
)
|
||||
except Exception as e:
|
||||
click.echo('Failed to connect to the API: {}'.format(e))
|
||||
exit(1)
|
||||
message = 'Failed to connect to the API: {}'.format(e)
|
||||
response = ErrorResponse({'message':message}, 500)
|
||||
|
||||
# Display debug output
|
||||
if config['debug']:
|
||||
|
@ -68,7 +68,7 @@ def net_info(config, net):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_list(config, limit):
|
||||
"""
|
||||
@ -87,7 +87,7 @@ def net_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_add(config, vni, description, nettype, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
|
||||
"""
|
||||
@ -118,7 +118,7 @@ def net_add(config, vni, description, nettype, domain, name_servers, ip4_network
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
|
||||
"""
|
||||
@ -157,7 +157,7 @@ def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def net_remove(config, net):
|
||||
"""
|
||||
@ -174,7 +174,7 @@ def net_remove(config, net):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
#
|
||||
# DHCP lease functions
|
||||
@ -192,7 +192,7 @@ def net_dhcp_info(config, net, mac):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_dhcp_list(config, net, limit, only_static=False):
|
||||
"""
|
||||
@ -205,15 +205,18 @@ def net_dhcp_list(config, net, limit, only_static=False):
|
||||
params = dict()
|
||||
if limit:
|
||||
params['limit'] = limit
|
||||
|
||||
if only_static:
|
||||
params['static'] = True
|
||||
else:
|
||||
params['static'] = False
|
||||
|
||||
response = call_api(config, 'get', '/network/{net}/lease'.format(net=net), params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
|
||||
"""
|
||||
@ -235,7 +238,7 @@ def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def net_dhcp_remove(config, net, mac):
|
||||
"""
|
||||
@ -252,7 +255,7 @@ def net_dhcp_remove(config, net, mac):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
#
|
||||
# ACL functions
|
||||
@ -270,7 +273,7 @@ def net_acl_info(config, net, description):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_acl_list(config, net, limit, direction):
|
||||
"""
|
||||
@ -291,7 +294,7 @@ def net_acl_list(config, net, limit, direction):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def net_acl_add(config, net, direction, description, rule, order):
|
||||
"""
|
||||
@ -315,7 +318,7 @@ def net_acl_add(config, net, direction, description, rule, order):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def net_acl_remove(config, net, description):
|
||||
"""
|
||||
@ -332,7 +335,7 @@ def net_acl_remove(config, net, description):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
|
||||
#
|
||||
|
@ -44,7 +44,7 @@ def node_coordinator_state(config, node, action):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def node_domain_state(config, node, action, wait):
|
||||
"""
|
||||
@ -65,7 +65,7 @@ def node_domain_state(config, node, action, wait):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def node_info(config, node):
|
||||
"""
|
||||
@ -80,9 +80,9 @@ def node_info(config, node):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def node_list(config, limit):
|
||||
def node_list(config, limit, target_daemon_state, target_coordinator_state, target_domain_state):
|
||||
"""
|
||||
Get list information about nodes (limited by {limit})
|
||||
|
||||
@ -93,13 +93,19 @@ def node_list(config, limit):
|
||||
params = dict()
|
||||
if limit:
|
||||
params['limit'] = limit
|
||||
if target_daemon_state:
|
||||
params['daemon_state'] = target_daemon_state
|
||||
if target_coordinator_state:
|
||||
params['coordinator_state'] = target_coordinator_state
|
||||
if target_domain_state:
|
||||
params['domain_state'] = target_domain_state
|
||||
|
||||
response = call_api(config, 'get', '/node', params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
#
|
||||
# Output display functions
|
||||
@ -130,42 +136,59 @@ def getOutputColours(node_information):
|
||||
else:
|
||||
domain_state_colour = ansiprint.blue()
|
||||
|
||||
return daemon_state_colour, coordinator_state_colour, domain_state_colour
|
||||
if node_information['memory']['allocated'] > node_information['memory']['total']:
|
||||
mem_allocated_colour = ansiprint.yellow()
|
||||
else:
|
||||
mem_allocated_colour = ''
|
||||
|
||||
if node_information['memory']['provisioned'] > node_information['memory']['total']:
|
||||
mem_provisioned_colour = ansiprint.yellow()
|
||||
else:
|
||||
mem_provisioned_colour = ''
|
||||
|
||||
return daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour
|
||||
|
||||
def format_info(node_information, long_output):
|
||||
daemon_state_colour, coordinator_state_colour, domain_state_colour = getOutputColours(node_information)
|
||||
daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information)
|
||||
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
ainformation = []
|
||||
# Basic information
|
||||
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name']))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end()))
|
||||
ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end()))
|
||||
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end()))
|
||||
ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count']))
|
||||
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name']))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end()))
|
||||
ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end()))
|
||||
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end()))
|
||||
ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count']))
|
||||
if long_output:
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch']))
|
||||
ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os']))
|
||||
ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel']))
|
||||
ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch']))
|
||||
ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os']))
|
||||
ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel']))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total']))
|
||||
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated']))
|
||||
ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load']))
|
||||
ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total']))
|
||||
ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used']))
|
||||
ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free']))
|
||||
ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated']))
|
||||
ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total']))
|
||||
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated']))
|
||||
ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load']))
|
||||
ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total']))
|
||||
ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used']))
|
||||
ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free']))
|
||||
ainformation.append('{}Allocated RAM (MiB):{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), mem_allocated_colour, node_information['memory']['allocated'], ansiprint.end()))
|
||||
ainformation.append('{}Provisioned RAM (MiB):{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), mem_provisioned_colour, node_information['memory']['provisioned'], ansiprint.end()))
|
||||
|
||||
# Join it all together
|
||||
ainformation.append('')
|
||||
return '\n'.join(ainformation)
|
||||
|
||||
def format_list(node_list):
|
||||
def format_list(node_list, raw):
|
||||
# Handle single-element lists
|
||||
if not isinstance(node_list, list):
|
||||
node_list = [ node_list ]
|
||||
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for node in sorted(item['name'] for item in node_list):
|
||||
ainformation.append(node)
|
||||
return '\n'.join(ainformation)
|
||||
|
||||
node_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
@ -179,7 +202,8 @@ def format_list(node_list):
|
||||
mem_total_length = 6
|
||||
mem_used_length = 5
|
||||
mem_free_length = 5
|
||||
mem_alloc_length = 4
|
||||
mem_alloc_length = 6
|
||||
mem_prov_length = 5
|
||||
for node_information in node_list:
|
||||
# node_name column
|
||||
_node_name_length = len(node_information['name']) + 1
|
||||
@ -226,12 +250,17 @@ def format_list(node_list):
|
||||
if _mem_alloc_length > mem_alloc_length:
|
||||
mem_alloc_length = _mem_alloc_length
|
||||
|
||||
# mem_prov column
|
||||
_mem_prov_length = len(str(node_information['memory']['provisioned'])) + 1
|
||||
if _mem_prov_length > mem_prov_length:
|
||||
mem_prov_length = _mem_prov_length
|
||||
|
||||
# Format the string (header)
|
||||
node_list_output.append(
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
St: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
Res: {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format(
|
||||
Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
@ -243,6 +272,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
daemon_state_colour='',
|
||||
@ -259,18 +289,19 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
node_mem_total='Total',
|
||||
node_mem_used='Used',
|
||||
node_mem_free='Free',
|
||||
node_mem_allocated='VMs'
|
||||
node_mem_allocated='Alloc',
|
||||
node_mem_provisioned='Prov'
|
||||
)
|
||||
)
|
||||
|
||||
# Format the string (elements)
|
||||
for node_information in node_list:
|
||||
daemon_state_colour, coordinator_state_colour, domain_state_colour = getOutputColours(node_information)
|
||||
daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information)
|
||||
node_list_output.append(
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format(
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour} {mem_provisioned_colour}{node_mem_provisioned: <{mem_prov_length}}{end_colour}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
@ -282,11 +313,14 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
daemon_state_colour=daemon_state_colour,
|
||||
coordinator_state_colour=coordinator_state_colour,
|
||||
domain_state_colour=domain_state_colour,
|
||||
mem_allocated_colour=mem_allocated_colour,
|
||||
mem_provisioned_colour=mem_allocated_colour,
|
||||
end_colour=ansiprint.end(),
|
||||
node_name=node_information['name'],
|
||||
node_daemon_state=node_information['daemon_state'],
|
||||
@ -298,7 +332,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
node_mem_total=node_information['memory']['total'],
|
||||
node_mem_used=node_information['memory']['used'],
|
||||
node_mem_free=node_information['memory']['free'],
|
||||
node_mem_allocated=node_information['memory']['allocated']
|
||||
node_mem_allocated=node_information['memory']['allocated'],
|
||||
node_mem_provisioned=node_information['memory']['provisioned']
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -25,8 +25,10 @@ import re
|
||||
import subprocess
|
||||
import ast
|
||||
|
||||
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
|
||||
|
||||
import cli_lib.ansiprint as ansiprint
|
||||
from cli_lib.common import call_api
|
||||
from cli_lib.common import UploadProgressBar, call_api
|
||||
|
||||
#
|
||||
# Primary functions
|
||||
@ -44,7 +46,7 @@ def template_info(config, template, template_type):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def template_list(config, limit, template_type=None):
|
||||
"""
|
||||
@ -66,7 +68,7 @@ def template_list(config, limit, template_type=None):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def template_add(config, params, template_type=None):
|
||||
"""
|
||||
@ -83,9 +85,26 @@ def template_add(config, params, template_type=None):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def template_remove(config, name, template_type=None):
|
||||
def template_modify(config, params, name, template_type):
|
||||
"""
|
||||
Modify an existing template of {template_type} with {params}
|
||||
|
||||
API endpoint: PUT /api/v1/provisioner/template/{template_type}/{name}
|
||||
API_arguments: args
|
||||
API schema: {message}
|
||||
"""
|
||||
response = call_api(config, 'put', '/provisioner/template/{template_type}/{name}'.format(template_type=template_type, name=name), params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retvalue = True
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def template_remove(config, name, template_type):
|
||||
"""
|
||||
Remove template {name} of {template_type}
|
||||
|
||||
@ -100,7 +119,7 @@ def template_remove(config, name, template_type=None):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def template_element_add(config, name, element_id, params, element_type=None, template_type=None):
|
||||
"""
|
||||
@ -117,7 +136,7 @@ def template_element_add(config, name, element_id, params, element_type=None, te
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def template_element_remove(config, name, element_id, element_type=None, template_type=None):
|
||||
"""
|
||||
@ -134,7 +153,7 @@ def template_element_remove(config, name, element_id, element_type=None, templat
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def userdata_info(config, userdata):
|
||||
"""
|
||||
@ -149,7 +168,7 @@ def userdata_info(config, userdata):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def userdata_list(config, limit):
|
||||
"""
|
||||
@ -168,7 +187,22 @@ def userdata_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def userdata_show(config, name):
|
||||
"""
|
||||
Get information about userdata name
|
||||
|
||||
API endpoint: GET /api/v1/provisioner/userdata/{name}
|
||||
API arguments:
|
||||
API schema: [{json_data_object},{json_data_object},etc.]
|
||||
"""
|
||||
response = call_api(config, 'get', '/provisioner/userdata/{}'.format(name))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]['userdata']
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def userdata_add(config, params):
|
||||
"""
|
||||
@ -194,7 +228,7 @@ def userdata_add(config, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def userdata_modify(config, name, params):
|
||||
"""
|
||||
@ -219,7 +253,7 @@ def userdata_modify(config, name, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def userdata_remove(config, name):
|
||||
"""
|
||||
@ -236,7 +270,7 @@ def userdata_remove(config, name):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def script_info(config, script):
|
||||
"""
|
||||
@ -251,7 +285,7 @@ def script_info(config, script):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def script_list(config, limit):
|
||||
"""
|
||||
@ -270,7 +304,22 @@ def script_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def script_show(config, name):
|
||||
"""
|
||||
Get information about script name
|
||||
|
||||
API endpoint: GET /api/v1/provisioner/script/{name}
|
||||
API arguments:
|
||||
API schema: [{json_data_object},{json_data_object},etc.]
|
||||
"""
|
||||
response = call_api(config, 'get', '/provisioner/script/{}'.format(name))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]['script']
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def script_add(config, params):
|
||||
"""
|
||||
@ -296,7 +345,7 @@ def script_add(config, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def script_modify(config, name, params):
|
||||
"""
|
||||
@ -321,7 +370,7 @@ def script_modify(config, name, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def script_remove(config, name):
|
||||
"""
|
||||
@ -338,7 +387,90 @@ def script_remove(config, name):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def ova_info(config, name):
|
||||
"""
|
||||
Get information about OVA image {name}
|
||||
|
||||
API endpoint: GET /api/v1/provisioner/ova/{name}
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, 'get', '/provisioner/ova/{name}'.format(name=name))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ova_list(config, limit):
|
||||
"""
|
||||
Get list information about OVA images (limited by {limit})
|
||||
|
||||
API endpoint: GET /api/v1/provisioner/ova
|
||||
API arguments: limit={limit}
|
||||
API schema: [{json_data_object},{json_data_object},etc.]
|
||||
"""
|
||||
params = dict()
|
||||
if limit:
|
||||
params['limit'] = limit
|
||||
|
||||
response = call_api(config, 'get', '/provisioner/ova', params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def ova_upload(config, name, ova_file, params):
|
||||
"""
|
||||
Upload an OVA image to the cluster
|
||||
|
||||
API endpoint: POST /api/v1/provisioner/ova/{name}
|
||||
API arguments: pool={pool}, ova_size={ova_size}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
import click
|
||||
|
||||
bar = UploadProgressBar(ova_file, end_message="Parsing file on remote side...", end_nl=False)
|
||||
upload_data = MultipartEncoder(
|
||||
fields={ 'file': ('filename', open(ova_file, 'rb'), 'application/octet-stream')}
|
||||
)
|
||||
upload_monitor = MultipartEncoderMonitor(upload_data, bar.update)
|
||||
|
||||
headers = {
|
||||
"Content-Type": upload_monitor.content_type
|
||||
}
|
||||
|
||||
response = call_api(config, 'post', '/provisioner/ova/{}'.format(name), headers=headers, params=params, data=upload_monitor)
|
||||
|
||||
click.echo("done.")
|
||||
click.echo()
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def ova_remove(config, name):
|
||||
"""
|
||||
Remove OVA image {name}
|
||||
|
||||
API endpoint: DELETE /api/v1/provisioner/ova/{name}
|
||||
API_arguments:
|
||||
API schema: {message}
|
||||
"""
|
||||
response = call_api(config, 'delete', '/provisioner/ova/{name}'.format(name=name))
|
||||
|
||||
if response.status_code == 200:
|
||||
retvalue = True
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def profile_info(config, profile):
|
||||
"""
|
||||
@ -353,7 +485,7 @@ def profile_info(config, profile):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def profile_list(config, limit):
|
||||
"""
|
||||
@ -372,7 +504,7 @@ def profile_list(config, limit):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def profile_add(config, params):
|
||||
"""
|
||||
@ -389,7 +521,7 @@ def profile_add(config, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def profile_modify(config, name, params):
|
||||
"""
|
||||
@ -406,7 +538,7 @@ def profile_modify(config, name, params):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def profile_remove(config, name):
|
||||
"""
|
||||
@ -423,21 +555,22 @@ def profile_remove(config, name):
|
||||
else:
|
||||
retvalue = False
|
||||
|
||||
return retvalue, response.json()['message']
|
||||
return retvalue, response.json().get('message', '')
|
||||
|
||||
def vm_create(config, name, profile, wait_flag, define_flag, start_flag):
|
||||
def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args):
|
||||
"""
|
||||
Create a new VM named {name} with profile {profile}
|
||||
|
||||
API endpoint: POST /api/v1/provisioner/create
|
||||
API_arguments: name={name}, profile={profile}
|
||||
API_arguments: name={name}, profile={profile}, arg={script_args}
|
||||
API schema: {message}
|
||||
"""
|
||||
params = {
|
||||
'name': name,
|
||||
'profile': profile,
|
||||
'start_vm': start_flag,
|
||||
'define_vm': define_flag
|
||||
'define_vm': define_flag,
|
||||
'arg': script_args
|
||||
}
|
||||
response = call_api(config, 'post', '/provisioner/create', params=params)
|
||||
|
||||
@ -450,7 +583,7 @@ def vm_create(config, name, profile, wait_flag, define_flag, start_flag):
|
||||
retdata = response.json()['task_id']
|
||||
else:
|
||||
retvalue = False
|
||||
retdata = response.json()['message']
|
||||
retdata = response.json().get('message', '')
|
||||
|
||||
return retvalue, retdata
|
||||
|
||||
@ -498,7 +631,7 @@ def task_status(config, task_id=None, is_watching=False):
|
||||
)
|
||||
else:
|
||||
retvalue = False
|
||||
retdata = response.json()['message']
|
||||
retdata = response.json().get('message', '')
|
||||
else:
|
||||
retvalue = True
|
||||
task_data_raw = response.json()
|
||||
@ -1069,15 +1202,139 @@ def format_list_script(script_data, lines=None):
|
||||
|
||||
return '\n'.join([script_list_output_header] + script_list_output)
|
||||
|
||||
def format_list_ova(ova_data):
|
||||
if isinstance(ova_data, dict):
|
||||
ova_data = [ ova_data ]
|
||||
|
||||
ova_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
ova_name_length = 5
|
||||
ova_id_length = 3
|
||||
ova_disk_id_length = 8
|
||||
ova_disk_size_length = 10
|
||||
ova_disk_pool_length = 5
|
||||
ova_disk_volume_format_length = 7
|
||||
ova_disk_volume_name_length = 13
|
||||
|
||||
for ova in ova_data:
|
||||
# ova_name column
|
||||
_ova_name_length = len(str(ova['name'])) + 1
|
||||
if _ova_name_length > ova_name_length:
|
||||
ova_name_length = _ova_name_length
|
||||
# ova_id column
|
||||
_ova_id_length = len(str(ova['id'])) + 1
|
||||
if _ova_id_length > ova_id_length:
|
||||
ova_id_length = _ova_id_length
|
||||
|
||||
for disk in ova['volumes']:
|
||||
# ova_disk_id column
|
||||
_ova_disk_id_length = len(str(disk['disk_id'])) + 1
|
||||
if _ova_disk_id_length > ova_disk_id_length:
|
||||
ova_disk_id_length = _ova_disk_id_length
|
||||
# ova_disk_size column
|
||||
_ova_disk_size_length = len(str(disk['disk_size_gb'])) + 1
|
||||
if _ova_disk_size_length > ova_disk_size_length:
|
||||
ova_disk_size_length = _ova_disk_size_length
|
||||
# ova_disk_pool column
|
||||
_ova_disk_pool_length = len(str(disk['pool'])) + 1
|
||||
if _ova_disk_pool_length > ova_disk_pool_length:
|
||||
ova_disk_pool_length = _ova_disk_pool_length
|
||||
# ova_disk_volume_format column
|
||||
_ova_disk_volume_format_length = len(str(disk['volume_format'])) + 1
|
||||
if _ova_disk_volume_format_length > ova_disk_volume_format_length:
|
||||
ova_disk_volume_format_length = _ova_disk_volume_format_length
|
||||
# ova_disk_volume_name column
|
||||
_ova_disk_volume_name_length = len(str(disk['volume_name'])) + 1
|
||||
if _ova_disk_volume_name_length > ova_disk_volume_name_length:
|
||||
ova_disk_volume_name_length = _ova_disk_volume_name_length
|
||||
|
||||
# Format the string (header)
|
||||
ova_list_output_header = '{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \
|
||||
{ova_disk_id: <{ova_disk_id_length}} \
|
||||
{ova_disk_size: <{ova_disk_size_length}} \
|
||||
{ova_disk_pool: <{ova_disk_pool_length}} \
|
||||
{ova_disk_volume_format: <{ova_disk_volume_format_length}} \
|
||||
{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format(
|
||||
ova_name_length=ova_name_length,
|
||||
ova_id_length=ova_id_length,
|
||||
ova_disk_id_length=ova_disk_id_length,
|
||||
ova_disk_pool_length=ova_disk_pool_length,
|
||||
ova_disk_size_length=ova_disk_size_length,
|
||||
ova_disk_volume_format_length=ova_disk_volume_format_length,
|
||||
ova_disk_volume_name_length=ova_disk_volume_name_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
ova_name='Name',
|
||||
ova_id='ID',
|
||||
ova_disk_id='Disk ID',
|
||||
ova_disk_size='Size [GB]',
|
||||
ova_disk_pool='Pool',
|
||||
ova_disk_volume_format='Format',
|
||||
ova_disk_volume_name='Source Volume',
|
||||
)
|
||||
|
||||
# Format the string (elements)
|
||||
for ova in sorted(ova_data, key=lambda i: i.get('name', None)):
|
||||
ova_list_output.append(
|
||||
'{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}}{end_bold}'.format(
|
||||
ova_name_length=ova_name_length,
|
||||
ova_id_length=ova_id_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
ova_name=str(ova['name']),
|
||||
ova_id=str(ova['id'])
|
||||
)
|
||||
)
|
||||
for disk in sorted(ova['volumes'], key=lambda i: i.get('disk_id', None)):
|
||||
ova_list_output.append(
|
||||
'{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \
|
||||
{ova_disk_id: <{ova_disk_id_length}} \
|
||||
{ova_disk_size: <{ova_disk_size_length}} \
|
||||
{ova_disk_pool: <{ova_disk_pool_length}} \
|
||||
{ova_disk_volume_format: <{ova_disk_volume_format_length}} \
|
||||
{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format(
|
||||
ova_name_length=ova_name_length,
|
||||
ova_id_length=ova_id_length,
|
||||
ova_disk_id_length=ova_disk_id_length,
|
||||
ova_disk_size_length=ova_disk_size_length,
|
||||
ova_disk_pool_length=ova_disk_pool_length,
|
||||
ova_disk_volume_format_length=ova_disk_volume_format_length,
|
||||
ova_disk_volume_name_length=ova_disk_volume_name_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
ova_name='',
|
||||
ova_id='',
|
||||
ova_disk_id=str(disk['disk_id']),
|
||||
ova_disk_size=str(disk['disk_size_gb']),
|
||||
ova_disk_pool=str(disk['pool']),
|
||||
ova_disk_volume_format=str(disk['volume_format']),
|
||||
ova_disk_volume_name=str(disk['volume_name']),
|
||||
)
|
||||
)
|
||||
|
||||
return '\n'.join([ova_list_output_header] + ova_list_output)
|
||||
|
||||
def format_list_profile(profile_data):
|
||||
if isinstance(profile_data, dict):
|
||||
profile_data = [ profile_data ]
|
||||
|
||||
# Format the profile "source" from the type and, if applicable, OVA profile name
|
||||
for profile in profile_data:
|
||||
profile_type = profile['type']
|
||||
if 'ova' in profile_type:
|
||||
# Set the source to the name of the OVA:
|
||||
profile['source'] = 'OVA {}'.format(profile['ova'])
|
||||
else:
|
||||
# Set the source to be the type
|
||||
profile['source'] = profile_type
|
||||
|
||||
profile_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
profile_name_length = 5
|
||||
profile_id_length = 3
|
||||
profile_source_length = 7
|
||||
|
||||
profile_system_template_length = 7
|
||||
profile_network_template_length = 8
|
||||
@ -1094,6 +1351,10 @@ def format_list_profile(profile_data):
|
||||
_profile_id_length = len(str(profile['id'])) + 1
|
||||
if _profile_id_length > profile_id_length:
|
||||
profile_id_length = _profile_id_length
|
||||
# profile_source column
|
||||
_profile_source_length = len(str(profile['source'])) + 1
|
||||
if _profile_source_length > profile_source_length:
|
||||
profile_source_length = _profile_source_length
|
||||
# profile_system_template column
|
||||
_profile_system_template_length = len(str(profile['system_template'])) + 1
|
||||
if _profile_system_template_length > profile_system_template_length:
|
||||
@ -1116,7 +1377,7 @@ def format_list_profile(profile_data):
|
||||
profile_script_length = _profile_script_length
|
||||
|
||||
# Format the string (header)
|
||||
profile_list_output_header = '{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} \
|
||||
profile_list_output_header = '{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \
|
||||
Templates: {profile_system_template: <{profile_system_template_length}} \
|
||||
{profile_network_template: <{profile_network_template_length}} \
|
||||
{profile_storage_template: <{profile_storage_template_length}} \
|
||||
@ -1125,6 +1386,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
|
||||
{profile_arguments}{end_bold}'.format(
|
||||
profile_name_length=profile_name_length,
|
||||
profile_id_length=profile_id_length,
|
||||
profile_source_length=profile_source_length,
|
||||
profile_system_template_length=profile_system_template_length,
|
||||
profile_network_template_length=profile_network_template_length,
|
||||
profile_storage_template_length=profile_storage_template_length,
|
||||
@ -1134,6 +1396,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
|
||||
end_bold=ansiprint.end(),
|
||||
profile_name='Name',
|
||||
profile_id='ID',
|
||||
profile_source='Source',
|
||||
profile_system_template='System',
|
||||
profile_network_template='Network',
|
||||
profile_storage_template='Storage',
|
||||
@ -1145,7 +1408,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
|
||||
# Format the string (elements)
|
||||
for profile in sorted(profile_data, key=lambda i: i.get('name', None)):
|
||||
profile_list_output.append(
|
||||
'{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} \
|
||||
'{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \
|
||||
{profile_system_template: <{profile_system_template_length}} \
|
||||
{profile_network_template: <{profile_network_template_length}} \
|
||||
{profile_storage_template: <{profile_storage_template_length}} \
|
||||
@ -1154,6 +1417,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
|
||||
{profile_arguments}{end_bold}'.format(
|
||||
profile_name_length=profile_name_length,
|
||||
profile_id_length=profile_id_length,
|
||||
profile_source_length=profile_source_length,
|
||||
profile_system_template_length=profile_system_template_length,
|
||||
profile_network_template_length=profile_network_template_length,
|
||||
profile_storage_template_length=profile_storage_template_length,
|
||||
@ -1163,6 +1427,7 @@ Data: {profile_userdata: <{profile_userdata_length}} \
|
||||
end_bold='',
|
||||
profile_name=profile['name'],
|
||||
profile_id=profile['id'],
|
||||
profile_source=profile['source'],
|
||||
profile_system_template=profile['system_template'],
|
||||
profile_network_template=profile['network_template'],
|
||||
profile_storage_template=profile['storage_template'],
|
||||
|
@ -28,14 +28,14 @@ from collections import deque
|
||||
|
||||
import cli_lib.ansiprint as ansiprint
|
||||
import cli_lib.ceph as ceph
|
||||
from cli_lib.common import call_api
|
||||
from cli_lib.common import call_api, format_bytes, format_metric
|
||||
|
||||
#
|
||||
# Primary functions
|
||||
#
|
||||
def vm_info(config, vm):
|
||||
"""
|
||||
Get information about VM
|
||||
Get information about (single) VM
|
||||
|
||||
API endpoint: GET /api/v1/vm/{vm}
|
||||
API arguments:
|
||||
@ -44,9 +44,18 @@ def vm_info(config, vm):
|
||||
response = call_api(config, 'get', '/vm/{vm}'.format(vm=vm))
|
||||
|
||||
if response.status_code == 200:
|
||||
if isinstance(response.json(), list) and len(response.json()) > 1:
|
||||
# No exact match; return not found
|
||||
return False, "VM not found."
|
||||
else:
|
||||
if isinstance(response.json(), list):
|
||||
response = response.json()[0]
|
||||
else:
|
||||
response = response.json()
|
||||
return True, response
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def vm_list(config, limit, target_node, target_state):
|
||||
"""
|
||||
@ -69,7 +78,7 @@ def vm_list(config, limit, target_node, target_state):
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
def vm_define(config, xml, node, node_limit, node_selector, node_autostart):
|
||||
"""
|
||||
@ -95,7 +104,7 @@ def vm_define(config, xml, node, node_limit, node_selector, node_autostart):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_modify(config, vm, xml, restart):
|
||||
"""
|
||||
@ -118,7 +127,7 @@ def vm_modify(config, vm, xml, restart):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_metadata(config, vm, node_limit, node_selector, node_autostart, provisioner_profile):
|
||||
"""
|
||||
@ -151,7 +160,7 @@ def vm_metadata(config, vm, node_limit, node_selector, node_autostart, provision
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_remove(config, vm, delete_disks=False):
|
||||
"""
|
||||
@ -171,18 +180,19 @@ def vm_remove(config, vm, delete_disks=False):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_state(config, vm, target_state):
|
||||
def vm_state(config, vm, target_state, wait=False):
|
||||
"""
|
||||
Modify the current state of VM
|
||||
|
||||
API endpoint: POST /vm/{vm}/state
|
||||
API arguments: state={state}
|
||||
API arguments: state={state}, wait={wait}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params={
|
||||
'state': target_state,
|
||||
'wait': str(wait).lower()
|
||||
}
|
||||
response = call_api(config, 'post', '/vm/{vm}/state'.format(vm=vm), params=params)
|
||||
|
||||
@ -191,20 +201,22 @@ def vm_state(config, vm, target_state):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_node(config, vm, target_node, action, force=False):
|
||||
def vm_node(config, vm, target_node, action, force=False, wait=False, force_live=False):
|
||||
"""
|
||||
Modify the current node of VM via {action}
|
||||
|
||||
API endpoint: POST /vm/{vm}/node
|
||||
API arguments: node={target_node}, action={action}, force={force}
|
||||
API arguments: node={target_node}, action={action}, force={force}, wait={wait}, force_live={force_live}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params={
|
||||
'node': target_node,
|
||||
'action': action,
|
||||
'force': force
|
||||
'force': str(force).lower(),
|
||||
'wait': str(wait).lower(),
|
||||
'force_live': str(force_live).lower()
|
||||
}
|
||||
response = call_api(config, 'post', '/vm/{vm}/node'.format(vm=vm), params=params)
|
||||
|
||||
@ -213,7 +225,7 @@ def vm_node(config, vm, target_node, action, force=False):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def vm_locks(config, vm):
|
||||
"""
|
||||
@ -230,7 +242,7 @@ def vm_locks(config, vm):
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json()['message']
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
def view_console_log(config, vm, lines=100):
|
||||
"""
|
||||
@ -246,7 +258,7 @@ def view_console_log(config, vm, lines=100):
|
||||
response = call_api(config, 'get', '/vm/{vm}/console'.format(vm=vm), params=params)
|
||||
|
||||
if response.status_code != 200:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
console_log = response.json()['data']
|
||||
|
||||
@ -270,7 +282,7 @@ def follow_console_log(config, vm, lines=10):
|
||||
response = call_api(config, 'get', '/vm/{vm}/console'.format(vm=vm), params=params)
|
||||
|
||||
if response.status_code != 200:
|
||||
return False, response.json()['message']
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
# Shrink the log buffer to length lines
|
||||
console_log = response.json()['data']
|
||||
@ -336,6 +348,24 @@ def format_info(config, domain_information, long_output):
|
||||
ainformation.append('{}Arch:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['arch']))
|
||||
ainformation.append('{}Machine:{} {}'.format(ansiprint.purple(), ansiprint.end(), domain_information['machine']))
|
||||
ainformation.append('{}Features:{} {}'.format(ansiprint.purple(), ansiprint.end(), ' '.join(domain_information['features'])))
|
||||
ainformation.append('')
|
||||
ainformation.append('{0}Memory stats:{1} {2}Swap In Swap Out Faults (maj/min) Available Usable Unused RSS{3}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
|
||||
ainformation.append(' {0: <7} {1: <8} {2: <16} {3: <10} {4: <7} {5: <7} {6: <10}'.format(
|
||||
format_metric(domain_information['memory_stats'].get('swap_in')),
|
||||
format_metric(domain_information['memory_stats'].get('swap_out')),
|
||||
'/'.join([format_metric(domain_information['memory_stats'].get('major_fault')), format_metric(domain_information['memory_stats'].get('minor_fault'))]),
|
||||
format_bytes(domain_information['memory_stats'].get('available')*1024),
|
||||
format_bytes(domain_information['memory_stats'].get('usable')*1024),
|
||||
format_bytes(domain_information['memory_stats'].get('unused')*1024),
|
||||
format_bytes(domain_information['memory_stats'].get('rss')*1024)
|
||||
))
|
||||
ainformation.append('')
|
||||
ainformation.append('{0}vCPU stats:{1} {2}CPU time (ns) User time (ns) System time (ns){3}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
|
||||
ainformation.append(' {0: <16} {1: <16} {2: <15}'.format(
|
||||
str(domain_information['vcpu_stats'].get('cpu_time')),
|
||||
str(domain_information['vcpu_stats'].get('user_time')),
|
||||
str(domain_information['vcpu_stats'].get('system_time'))
|
||||
))
|
||||
|
||||
# PVC cluster information
|
||||
ainformation.append('')
|
||||
@ -391,7 +421,7 @@ def format_info(config, domain_information, long_output):
|
||||
net_vni = re.sub('br', '', net['source'])
|
||||
|
||||
response = call_api(config, 'get', '/network/{net}'.format(net=net_vni))
|
||||
if response.status_code != 200 and net_vni != 'cluster':
|
||||
if response.status_code != 200 and net_vni not in ['cluster', 'storage', 'upstream']:
|
||||
net_list.append(ansiprint.red() + net_vni + ansiprint.end() + ' [invalid]')
|
||||
else:
|
||||
net_list.append(net_vni)
|
||||
@ -407,13 +437,31 @@ def format_info(config, domain_information, long_output):
|
||||
_name_length = len(disk['name']) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
ainformation.append('{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus{4}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), 'Name', ansiprint.end(), width=name_length))
|
||||
ainformation.append('{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus Requests (r/w) Data (r/w){4}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), 'Name', ansiprint.end(), width=name_length))
|
||||
for disk in domain_information['disks']:
|
||||
ainformation.append(' {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5}'.format(domain_information['disks'].index(disk), disk['type'], disk['name'], disk['dev'], disk['bus'], width=name_length))
|
||||
ainformation.append(' {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5} {5: <15} {6}'.format(
|
||||
domain_information['disks'].index(disk),
|
||||
disk['type'],
|
||||
disk['name'],
|
||||
disk['dev'],
|
||||
disk['bus'],
|
||||
'/'.join([str(format_metric(disk['rd_req'])), str(format_metric(disk['wr_req']))]),
|
||||
'/'.join([str(format_bytes(disk['rd_bytes'])), str(format_bytes(disk['wr_bytes']))]),
|
||||
width=name_length
|
||||
))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Interfaces:{} {}ID Type Source Model MAC{}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
|
||||
ainformation.append('{}Interfaces:{} {}ID Type Source Model MAC Data (r/w) Packets (r/w) Errors (r/w){}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
|
||||
for net in domain_information['networks']:
|
||||
ainformation.append(' {0: <3} {1: <8} {2: <10} {3: <8} {4}'.format(domain_information['networks'].index(net), net['type'], net['source'], net['model'], net['mac']))
|
||||
ainformation.append(' {0: <3} {1: <7} {2: <10} {3: <8} {4: <18} {5: <12} {6: <15} {7: <12}'.format(
|
||||
domain_information['networks'].index(net),
|
||||
net['type'],
|
||||
net['source'],
|
||||
net['model'],
|
||||
net['mac'],
|
||||
'/'.join([str(format_bytes(net['rd_bytes'])), str(format_bytes(net['wr_bytes']))]),
|
||||
'/'.join([str(format_metric(net['rd_packets'])), str(format_metric(net['wr_packets']))]),
|
||||
'/'.join([str(format_metric(net['rd_errors'])), str(format_metric(net['wr_errors']))]),
|
||||
))
|
||||
# Controller list
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Controllers:{} {}ID Type Model{}'.format(ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end()))
|
||||
@ -540,7 +588,7 @@ def format_list(config, vm_list, raw):
|
||||
for net_vni in raw_net_list:
|
||||
if not net_vni in valid_net_list:
|
||||
response = call_api(config, 'get', '/network/{net}'.format(net=net_vni))
|
||||
if response.status_code != 200 and net_vni != 'cluster':
|
||||
if response.status_code != 200 and net_vni not in ['cluster', 'storage', 'upstream']:
|
||||
vm_net_colour = ansiprint.red()
|
||||
else:
|
||||
valid_net_list.append(net_vni)
|
||||
|
@ -23,7 +23,7 @@
|
||||
import kazoo.client
|
||||
import uuid
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
|
||||
# Exists function
|
||||
def exists(zk_conn, key):
|
||||
|
File diff suppressed because it is too large
Load Diff
32
client-cli/scripts/README
Normal file
32
client-cli/scripts/README
Normal file
@ -0,0 +1,32 @@
|
||||
# PVC helper scripts
|
||||
|
||||
These helper scripts are included with the PVC client to aid administrators in some meta-functions.
|
||||
|
||||
The following scripts are provided for use:
|
||||
|
||||
## `migrate_vm`
|
||||
|
||||
Migrates a VM, with downtime, from one PVC cluster to another.
|
||||
|
||||
`migrate_vm <vm> <source_cluster> <destination_cluster>`
|
||||
|
||||
### Arguments
|
||||
|
||||
* `vm`: The virtual machine to migrate
|
||||
* `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client
|
||||
* `destination_cluster`: The destination PVC cluster; must be a valid cluster to the local PVC client
|
||||
|
||||
## `import_vm`
|
||||
|
||||
Imports a VM from another platform into a PVC cluster.
|
||||
|
||||
## `export_vm`
|
||||
|
||||
Exports a (stopped) VM from a PVC cluster to another platform.
|
||||
|
||||
`export_vm <vm> <source_cluster>`
|
||||
|
||||
### Arguments
|
||||
|
||||
* `vm`: The virtual machine to migrate
|
||||
* `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client
|
99
client-cli/scripts/export_vm
Executable file
99
client-cli/scripts/export_vm
Executable file
@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# export_vm - Exports a VM from a PVC cluster to local files
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Export a VM from a PVC cluster to local files."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <vm> <source_cluster> [<destination_directory>]"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster."
|
||||
echo -e " * The user on the cluster primary coordinator must have 'sudo' access."
|
||||
echo -e " * If the VM is not in 'stop' state, it will be shut down."
|
||||
echo -e " * Do not switch the cluster primary coordinator while the script is running."
|
||||
echo -e " * Ensure you have enough space in <destination_directory> to store all VM disk images."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
source_vm="${1}"
|
||||
source_cluster="${2}"
|
||||
if [[ -n "${3}" ]]; then
|
||||
destination_directory="${3}"
|
||||
else
|
||||
destination_directory="."
|
||||
fi
|
||||
|
||||
# Verify the cluster is reachable
|
||||
pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible"
|
||||
|
||||
# Determine the connection IP
|
||||
cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
# Attempt to connect to the cluster address
|
||||
ssh ${cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host"
|
||||
|
||||
# Verify that the VM exists
|
||||
pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the cluster"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Shut down the VM
|
||||
echo -n "Shutting down VM..."
|
||||
set +o errexit
|
||||
pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null
|
||||
shutdown_success=$?
|
||||
while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do
|
||||
sleep 1
|
||||
echo -n "."
|
||||
done
|
||||
set -o errexit
|
||||
echo " done."
|
||||
|
||||
# Dump the XML file
|
||||
echo -n "Exporting VM configuration file... "
|
||||
pvc -c ${source_cluster} vm dump ${source_vm} 1> ${destination_directory}/${source_vm}.xml 2>/dev/null
|
||||
echo "done".
|
||||
|
||||
# Determine the list of volumes in this VM
|
||||
volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )"
|
||||
for volume in ${volume_list}; do
|
||||
volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )"
|
||||
volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )"
|
||||
volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )"
|
||||
echo -n "Exporting disk ${volume_name} (${volume_size})... "
|
||||
ssh ${cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume}"
|
||||
ssh ${cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | dd bs=1M of="${destination_directory}/${volume_name}.img" 2>/dev/null
|
||||
ssh ${cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume}"
|
||||
echo "done."
|
||||
done
|
119
client-cli/scripts/force_single_node
Executable file
119
client-cli/scripts/force_single_node
Executable file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# force_single_node - Manually promote a single coordinator node from a degraded cluster
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Manually promote a single coordinator node from a degraded cluster"
|
||||
echo -e ""
|
||||
echo -e "DANGER: This action will cause a permanent split-brain within the cluster"
|
||||
echo -e " which will have to be corrected manually upon cluster restoration."
|
||||
echo -e ""
|
||||
echo -e "This script is primarily designed for small clusters in situations where 2"
|
||||
echo -e "of the 3 coordinators have become unreachable or shut down. It will promote"
|
||||
echo -e "the remaining lone_node to act as a standalone coordinator, allowing basic"
|
||||
echo -e "cluster functionality to continue in a heavily degraded state until the"
|
||||
echo -e "situation can be rectified. This should only be done in exceptional cases"
|
||||
echo -e "as a disaster recovery mechanism when the remaining nodes will remain down"
|
||||
echo -e "for a significant amount of time but some VMs are required to run. In general,"
|
||||
echo -e "use of this script is not advisable."
|
||||
echo -e ""
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <target_cluster> <lone_node>"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The lone_node must be a fully-qualified name that is directly reachable from"
|
||||
echo -e " the local system via SSH."
|
||||
echo -e " * The local user must have valid SSH access to the lone_node in the cluster."
|
||||
echo -e " * The user on the cluster node must have 'sudo' access."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
target_cluster="${1}"
|
||||
lone_node="${2}"
|
||||
lone_node_shortname="${lone_node%%.*}"
|
||||
|
||||
# Attempt to connect to the node
|
||||
ssh ${lone_node} which pvc &>/dev/null || fail "Could not SSH to the lone_node host"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
echo -n "Allowing Ceph single-node operation... "
|
||||
temp_monmap="$( ssh ${lone_node} mktemp )"
|
||||
ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}" &>/dev/null
|
||||
ssh ${lone_node} "ceph-mon -i ${lone_node_shortname} --extract-monmap ${temp_monmap}" &>/dev/null
|
||||
ssh ${lone_node} "sudo cp ${tmp_monmap} /etc/ceph/monmap.orig" &>/dev/null
|
||||
mon_list="$( ssh ${lone_node} strings ${temp_monmap} | sort | uniq )"
|
||||
for mon in ${mon_list}; do
|
||||
if [[ ${mon} == ${lone_node_shortname} ]]; then
|
||||
continue
|
||||
fi
|
||||
ssh ${lone_node} "sudo monmaptool ${temp_monmap} --rm ${mon}" &>/dev/null
|
||||
done
|
||||
ssh ${lone_node} "sudo ceph-mon -i ${lone_node_shortname} --inject-monmap ${temp_monmap}" &>/dev/null
|
||||
ssh ${lone_node} "sudo systemctl start ceph-mon@${lone_node_shortname}" &>/dev/null
|
||||
sleep 5
|
||||
ssh ${lone_node} "sudo ceph osd set noout" &>/dev/null
|
||||
echo "done."
|
||||
echo -e "Restoration steps:"
|
||||
echo -e " sudo systemctl stop ceph-mon@${lone_node_shortname}"
|
||||
echo -e " sudo ceph-mon -i ${lone_node_shortname} --inject-monmap /etc/ceph/monmap.orig"
|
||||
echo -e " sudo systemctl start ceph-mon@${lone_node_shortname}"
|
||||
echo -e " sudo ceph osd unset noout"
|
||||
|
||||
echo -n "Allowing Zookeeper single-node operation... "
|
||||
temp_zoocfg="$( ssh ${lone_node} mktemp )"
|
||||
ssh ${lone_node} "sudo systemctl stop zookeeper"
|
||||
ssh ${lone_node} "sudo awk -v lone_node=${lone_node_shortname} '{
|
||||
FS="=|:"
|
||||
if ( $1 ~ /^server/ ){
|
||||
if ($2 == lone_node) {
|
||||
print $0
|
||||
} else {
|
||||
print "#" $0
|
||||
}
|
||||
} else {
|
||||
print $0
|
||||
}
|
||||
}' /etc/zookeeper/conf/zoo.cfg > ${temp_zoocfg}"
|
||||
ssh ${lone_node} "sudo mv /etc/zookeeper/conf/zoo.cfg /etc/zookeeper/conf/zoo.cfg.orig"
|
||||
ssh ${lone_node} "sudo mv ${temp_zoocfg} /etc/zookeeper/conf/zoo.cfg"
|
||||
ssh ${lone_node} "sudo systemctl start zookeeper"
|
||||
echo "done."
|
||||
echo -e "Restoration steps:"
|
||||
echo -e " sudo systemctl stop zookeeper"
|
||||
echo -e " sudo mv /etc/zookeeper/conf/zoo.cfg.orig /etc/zookeeper/conf/zoo.cfg"
|
||||
echo -e " sudo systemctl start zookeeper"
|
||||
ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}"
|
||||
|
||||
echo ""
|
||||
ssh ${lone_node} "sudo pvc status 2>/dev/null"
|
81
client-cli/scripts/import_vm
Executable file
81
client-cli/scripts/import_vm
Executable file
@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# import_vm - Imports a VM to a PVC cluster from local files
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Import a VM to a PVC cluster from local files."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <destination_cluster> <destination_pool> <vm_configuration_file> <vm_disk_file_1> [<vm_disk_file_2>] [...]"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * At least one disk must be specified; all disks that are present in vm_configuration_file"
|
||||
echo -e " should be specified, though this is not strictly requireda."
|
||||
echo -e " * Do not switch the cluster primary coordinator while the script is running."
|
||||
echo -e " * Ensure you have enough space on the destination cluster to store all VM disks."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
destination_cluster="${1}"; shift
|
||||
destination_pool="${1}"; shift
|
||||
vm_config_file="${1}"; shift
|
||||
vm_disk_files=( ${@} )
|
||||
|
||||
# Verify the cluster is reachable
|
||||
pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible"
|
||||
|
||||
# Determine the connection IP
|
||||
cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Determine information about the VM from the config file
|
||||
parse_xml_field() {
|
||||
field="${1}"
|
||||
line="$( grep -F "<${field}>" ${vm_config_file} )"
|
||||
awk -F '>|<' '{ print $3 }' <<<"${line}"
|
||||
}
|
||||
vm_name="$( parse_xml_field name )"
|
||||
echo "Importing VM ${vm_name}..."
|
||||
pvc -c ${destination_cluster} vm define ${vm_config_file} 2>/dev/null
|
||||
|
||||
# Create the disks on the cluster
|
||||
for disk_file in ${vm_disk_files[@]}; do
|
||||
disk_file_basename="$( basename ${disk_file} )"
|
||||
disk_file_ext="${disk_file_basename##*.}"
|
||||
disk_file_name="$( basename ${disk_file_basename} .${disk_file_ext} )"
|
||||
disk_file_size="$( stat --format="%s" ${disk_file} )"
|
||||
|
||||
echo "Importing disk ${disk_file_name}... "
|
||||
pvc -c ${destination_cluster} storage volume add ${destination_pool} ${disk_file_name} ${disk_file_size}B 2>/dev/null
|
||||
pvc -c ${destination_cluster} storage volume upload ${destination_pool} ${disk_file_name} ${disk_file} 2>/dev/null
|
||||
done
|
116
client-cli/scripts/migrate_vm
Executable file
116
client-cli/scripts/migrate_vm
Executable file
@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# migrate_vm - Exports a VM from a PVC cluster to another PVC cluster
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Export a VM from a PVC cluster to another PVC cluster."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <vm> <source_cluster> <destination_cluster> <destination_pool>"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster."
|
||||
echo -e " * The user on the cluster primary coordinator must have 'sudo' access."
|
||||
echo -e " * If the VM is not in 'stop' state, it will be shut down."
|
||||
echo -e " * Do not switch the cluster primary coordinator on either cluster while the script is running."
|
||||
echo -e " * Ensure you have enough space on the target cluster to store all VM disks."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
source_vm="${1}"
|
||||
source_cluster="${2}"
|
||||
destination_cluster="${3}"
|
||||
destination_pool="${4}"
|
||||
|
||||
# Verify each cluster is reachable
|
||||
pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible"
|
||||
pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible"
|
||||
|
||||
# Determine the connection IPs
|
||||
source_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )"
|
||||
destination_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
# Attempt to connect to the cluster addresses
|
||||
ssh ${source_cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host"
|
||||
ssh ${destination_cluster_address} which pvc &>/dev/null || fail "Could not SSH to destination_cluster primary coordinator host"
|
||||
|
||||
# Verify that the VM exists
|
||||
pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the source cluster"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Shut down the VM
|
||||
echo -n "Shutting down VM..."
|
||||
set +o errexit
|
||||
pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null
|
||||
shutdown_success=$?
|
||||
while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do
|
||||
sleep 1
|
||||
echo -n "."
|
||||
done
|
||||
set -o errexit
|
||||
echo " done."
|
||||
|
||||
tempfile="$( mktemp )"
|
||||
|
||||
# Dump the XML file
|
||||
echo -n "Exporting VM configuration file from source cluster... "
|
||||
pvc -c ${source_cluster} vm dump ${source_vm} 1> ${tempfile} 2>/dev/null
|
||||
echo "done."
|
||||
|
||||
# Import the XML file
|
||||
echo -n "Importing VM configuration file to destination cluster... "
|
||||
pvc -c ${destination_cluster} vm define ${tempfile}
|
||||
echo "done."
|
||||
|
||||
rm -f ${tempfile}
|
||||
|
||||
# Determine the list of volumes in this VM
|
||||
volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )"
|
||||
|
||||
# Parse and migrate each volume
|
||||
for volume in ${volume_list}; do
|
||||
volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )"
|
||||
volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )"
|
||||
volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )"
|
||||
echo "Transferring disk ${volume_name} (${volume_size})... "
|
||||
pvc -c ${destination_cluster} storage volume add ${destination_pool} ${volume_name} ${volume_size} 2>/dev/null
|
||||
ssh ${source_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on source cluster"
|
||||
ssh ${destination_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on destination cluster"
|
||||
ssh ${source_cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | pv | ssh ${destination_cluster_address} sudo dd bs=1M of="/dev/rbd/${destination_pool}/${volume_name}" 2>/dev/null
|
||||
ssh ${source_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on source cluster"
|
||||
ssh ${destination_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on destination cluster"
|
||||
done
|
||||
|
||||
if [[ ${shutdown_success} -eq 0 ]]; then
|
||||
pvc -c ${destination_cluster} vm start ${source_vm}
|
||||
fi
|
@ -20,15 +20,17 @@
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import os
|
||||
import re
|
||||
import click
|
||||
import json
|
||||
import time
|
||||
import math
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import daemon_lib.vm as vm
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
import daemon_lib.common as common
|
||||
|
||||
#
|
||||
# Supplemental functions
|
||||
@ -96,8 +98,11 @@ def format_bytes_tohuman(databytes):
|
||||
|
||||
def format_bytes_fromhuman(datahuman):
|
||||
# Trim off human-readable character
|
||||
dataunit = datahuman[-1]
|
||||
datasize = int(datahuman[:-1])
|
||||
dataunit = str(datahuman)[-1]
|
||||
datasize = int(str(datahuman)[:-1])
|
||||
if not re.match('[A-Z]', dataunit):
|
||||
dataunit = 'B'
|
||||
datasize = int(datahuman)
|
||||
databytes = datasize * byte_unit_matrix[dataunit]
|
||||
return '{}B'.format(databytes)
|
||||
|
||||
@ -150,9 +155,9 @@ def get_status(zk_conn):
|
||||
}
|
||||
return True, status_data
|
||||
|
||||
def get_radosdf(zk_conn):
|
||||
def get_util(zk_conn):
|
||||
primary_node = zkhandler.readdata(zk_conn, '/primary_node')
|
||||
ceph_df = zkhandler.readdata(zk_conn, '/ceph/radosdf').rstrip()
|
||||
ceph_df = zkhandler.readdata(zk_conn, '/ceph/util').rstrip()
|
||||
|
||||
# Create a data structure for the information
|
||||
status_data = {
|
||||
@ -205,6 +210,8 @@ def getOutputColoursOSD(osd_information):
|
||||
|
||||
return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour
|
||||
|
||||
# OSD addition and removal uses the /cmd/ceph pipe
|
||||
# These actions must occur on the specific node they reference
|
||||
def add_osd(zk_conn, node, device, weight):
|
||||
# Verify the target node exists
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
@ -279,118 +286,35 @@ def in_osd(zk_conn, osd_id):
|
||||
if not verifyOSD(zk_conn, osd_id):
|
||||
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
||||
|
||||
# Tell the cluster to online an OSD
|
||||
in_osd_string = 'osd_in {}'.format(osd_id)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': in_osd_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-osd_in':
|
||||
message = 'Set OSD {} online in the cluster.'.format(osd_id)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to set OSD online; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
success = False
|
||||
message = 'ERROR Command ignored by node.'
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd in {}'.format(osd_id))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to enable OSD {}: {}'.format(osd_id, stderr)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
|
||||
return success, message
|
||||
return True, 'Set OSD {} online.'.format(osd_id)
|
||||
|
||||
def out_osd(zk_conn, osd_id):
|
||||
if not verifyOSD(zk_conn, osd_id):
|
||||
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id)
|
||||
|
||||
# Tell the cluster to offline an OSD
|
||||
out_osd_string = 'osd_out {}'.format(osd_id)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': out_osd_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-osd_out':
|
||||
message = 'Set OSD {} offline in the cluster.'.format(osd_id)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to set OSD offline; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
success = False
|
||||
message = 'ERROR Command ignored by node.'
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to disable OSD {}: {}'.format(osd_id, stderr)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
|
||||
return success, message
|
||||
return True, 'Set OSD {} offline.'.format(osd_id)
|
||||
|
||||
def set_osd(zk_conn, option):
|
||||
# Tell the cluster to set an OSD property
|
||||
set_osd_string = 'osd_set {}'.format(option)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': set_osd_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-osd_set':
|
||||
message = 'Set OSD property {} on the cluster.'.format(option)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to set OSD property; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
success = False
|
||||
message = 'ERROR Command ignored by node.'
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd set {}'.format(option))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to set property "{}": {}'.format(option, stderr)
|
||||
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
return success, message
|
||||
return True, 'Set OSD property "{}".'.format(option)
|
||||
|
||||
def unset_osd(zk_conn, option):
|
||||
# Tell the cluster to unset an OSD property
|
||||
unset_osd_string = 'osd_unset {}'.format(option)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': unset_osd_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-osd_unset':
|
||||
message = 'Unset OSD property {} on the cluster.'.format(option)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to unset OSD property; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
success = False
|
||||
message = 'ERROR Command ignored by node.'
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd unset {}'.format(option))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to unset property "{}": {}'.format(option, stderr)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
|
||||
return success, message
|
||||
return True, 'Unset OSD property "{}".'.format(option)
|
||||
|
||||
def get_list_osd(zk_conn, limit, is_fuzzy=True):
|
||||
osd_list = []
|
||||
@ -413,7 +337,7 @@ def get_list_osd(zk_conn, limit, is_fuzzy=True):
|
||||
else:
|
||||
osd_list.append(getOSDInformation(zk_conn, osd))
|
||||
|
||||
return True, osd_list
|
||||
return True, sorted(osd_list, key = lambda x: int(x['id']))
|
||||
|
||||
def format_list_osd(osd_list):
|
||||
osd_list_output = []
|
||||
@ -664,65 +588,66 @@ def getPoolInformation(zk_conn, pool):
|
||||
return pool_information
|
||||
|
||||
def add_pool(zk_conn, name, pgs, replcfg):
|
||||
# Tell the cluster to create a new pool
|
||||
add_pool_string = 'pool_add {},{},{}'.format(name, pgs, replcfg)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_pool_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-pool_add':
|
||||
message = 'Created new RBD pool "{}" with "{}" PGs and replication configuration {}.'.format(name, pgs, replcfg)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to create new pool; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
# Prepare the copies/mincopies variables
|
||||
try:
|
||||
copies, mincopies = replcfg.split(',')
|
||||
copies = int(copies.replace('copies=', ''))
|
||||
mincopies = int(mincopies.replace('mincopies=', ''))
|
||||
except:
|
||||
copies = None
|
||||
mincopies = None
|
||||
if not copies or not mincopies:
|
||||
return False, 'ERROR: Replication configuration "{}" is not valid.'.format(replcfg)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 1. Create the pool
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool create {} {} replicated'.format(name, pgs))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to create pool "{}" with {} PGs: {}'.format(name, pgs, stderr)
|
||||
|
||||
# 2. Set the size and minsize
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} size {}'.format(name, copies))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to set pool "{}" size of {}: {}'.format(name, copies, stderr)
|
||||
|
||||
return success, message
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} min_size {}'.format(name, mincopies))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to set pool "{}" minimum size of {}: {}'.format(name, mincopies, stderr)
|
||||
|
||||
# 3. Enable RBD application
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool application enable {} rbd'.format(name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to enable RBD application on pool "{}" : {}'.format(name, stderr)
|
||||
|
||||
# 4. Add the new pool to Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/pools/{}'.format(name): '',
|
||||
'/ceph/pools/{}/pgs'.format(name): pgs,
|
||||
'/ceph/pools/{}/stats'.format(name): '{}',
|
||||
'/ceph/volumes/{}'.format(name): '',
|
||||
'/ceph/snapshots/{}'.format(name): '',
|
||||
})
|
||||
|
||||
return True, 'Created RBD pool "{}" with {} PGs'.format(name, pgs)
|
||||
|
||||
def remove_pool(zk_conn, name):
|
||||
if not verifyPool(zk_conn, name):
|
||||
return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name)
|
||||
|
||||
# Tell the cluster to create a new pool
|
||||
remove_pool_string = 'pool_remove {}'.format(name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_pool_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-pool_remove':
|
||||
message = 'Removed RBD pool "{}" and all volumes.'.format(name)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to remove pool; check node logs for details.'
|
||||
success = False
|
||||
except Exception as e:
|
||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
||||
success = False
|
||||
# 1. Remove pool volumes
|
||||
for volume in zkhandler.listchildren(zk_conn, '/ceph/volumes/{}'.format(name)):
|
||||
remove_volume(zk_conn, logger, name, volume)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 2. Remove the pool
|
||||
retcode, stdout, stderr = common.run_os_command('ceph osd pool rm {pool} {pool} --yes-i-really-really-mean-it'.format(pool=name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to remove pool "{}": {}'.format(name, stderr)
|
||||
|
||||
return success, message
|
||||
# 3. Delete pool from Zookeeper
|
||||
zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name))
|
||||
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}'.format(name))
|
||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}'.format(name))
|
||||
|
||||
return True, 'Removed RBD pool "{}" and all volumes.'.format(name)
|
||||
|
||||
def get_list_pool(zk_conn, limit, is_fuzzy=True):
|
||||
pool_list = []
|
||||
@ -742,7 +667,7 @@ def get_list_pool(zk_conn, limit, is_fuzzy=True):
|
||||
else:
|
||||
pool_list.append(getPoolInformation(zk_conn, pool))
|
||||
|
||||
return True, pool_list
|
||||
return True, sorted(pool_list, key = lambda x: int(x['stats']['id']))
|
||||
|
||||
def format_list_pool(pool_list):
|
||||
pool_list_output = []
|
||||
@ -967,154 +892,171 @@ def getVolumeInformation(zk_conn, pool, volume):
|
||||
return volume_information
|
||||
|
||||
def add_volume(zk_conn, pool, name, size):
|
||||
# Tell the cluster to create a new volume
|
||||
databytes = format_bytes_fromhuman(size)
|
||||
add_volume_string = 'volume_add {},{},{}'.format(pool, name, databytes)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_volume_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-volume_add':
|
||||
message = 'Created new RBD volume "{}" of size "{}" on pool "{}".'.format(name, size, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to create new volume; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
# 1. Create the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd create --size {} --image-feature layering,exclusive-lock {}/{}'.format(size, pool, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to create RBD volume "{}": {}'.format(name, stderr)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 2. Get volume stats
|
||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
||||
volstats = stdout
|
||||
|
||||
return success, message
|
||||
# 3. Add the new volume to Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
||||
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
||||
})
|
||||
|
||||
return True, 'Created RBD volume "{}/{}" ({}).'.format(pool, name, size)
|
||||
|
||||
def clone_volume(zk_conn, pool, name_src, name_new):
|
||||
if not verifyVolume(zk_conn, pool, name_src):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name_src, pool)
|
||||
|
||||
# 1. Clone the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd copy {}/{} {}/{}'.format(pool, name_src, pool, name_new))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to clone RBD volume "{}" to "{}" in pool "{}": {}'.format(name_src, new_name, pool, stderr)
|
||||
|
||||
# 2. Get volume stats
|
||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name_new))
|
||||
volstats = stdout
|
||||
|
||||
# 3. Add the new volume to Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}'.format(pool, name_new): '',
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, name_new): volstats,
|
||||
'/ceph/snapshots/{}/{}'.format(pool, name_new): '',
|
||||
})
|
||||
|
||||
return True, 'Cloned RBD volume "{}" to "{}" in pool "{}"'.format(name, name_new, pool)
|
||||
|
||||
def resize_volume(zk_conn, pool, name, size):
|
||||
# Tell the cluster to resize the volume
|
||||
databytes = format_bytes_fromhuman(size)
|
||||
resize_volume_string = 'volume_resize {},{},{}'.format(pool, name, databytes)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': resize_volume_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
if not verifyVolume(zk_conn, pool, name):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||
|
||||
# 1. Resize the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd resize --size {} {}/{}'.format(size, pool, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to resize RBD volume "{}" to size "{}" in pool "{}": {}'.format(name, size, pool, stderr)
|
||||
|
||||
# 2a. Determine the node running this VM if applicable
|
||||
active_node = None
|
||||
volume_vm_name = name.split('_')[0]
|
||||
retcode, vm_info = vm.get_info(zk_conn, volume_vm_name)
|
||||
if retcode:
|
||||
for disk in vm_info['disks']:
|
||||
# This block device is present in this VM so we can continue
|
||||
if disk['name'] == '{}/{}'.format(pool, name):
|
||||
active_node = vm_info['node']
|
||||
volume_id = disk['dev']
|
||||
# 2b. Perform a live resize in libvirt if the VM is running
|
||||
if active_node is not None and vm_info.get('state', '') == 'start':
|
||||
import libvirt
|
||||
# Run the libvirt command against the target host
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-volume_resize':
|
||||
message = 'Resized RBD volume "{}" to size "{}" on pool "{}".'.format(name, size, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to resize volume; check node logs for details.'
|
||||
success = False
|
||||
dest_lv = 'qemu+tcp://{}/system'.format(active_node)
|
||||
target_lv_conn = libvirt.open(dest_lv)
|
||||
target_vm_conn = target_lv_conn.lookupByName(vm_info['name'])
|
||||
if target_vm_conn:
|
||||
target_vm_conn.blockResize(volume_id, int(format_bytes_fromhuman(size)[:-1]), libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES)
|
||||
target_lv_conn.close()
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
pass
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 2. Get volume stats
|
||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, name))
|
||||
volstats = stdout
|
||||
|
||||
return success, message
|
||||
# 3. Add the new volume to Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
||||
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
||||
})
|
||||
|
||||
return True, 'Resized RBD volume "{}" to size "{}" in pool "{}".'.format(name, size, pool)
|
||||
|
||||
def rename_volume(zk_conn, pool, name, new_name):
|
||||
# Tell the cluster to rename
|
||||
rename_volume_string = 'volume_rename {},{},{}'.format(pool, name, new_name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': rename_volume_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-volume_rename':
|
||||
message = 'Renamed RBD volume "{}" to "{}" on pool "{}".'.format(name, new_name, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to rename volume {} to {}; check node logs for details.'.format(name, new_name)
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
if not verifyVolume(zk_conn, pool, name):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 1. Rename the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd rename {}/{} {}'.format(pool, name, new_name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to rename volume "{}" to "{}" in pool "{}": {}'.format(name, new_name, pool, stderr)
|
||||
|
||||
return success, message
|
||||
# 2. Rename the volume in Zookeeper
|
||||
zkhandler.renamekey(zk_conn, {
|
||||
'/ceph/volumes/{}/{}'.format(pool, name): '/ceph/volumes/{}/{}'.format(pool, new_name),
|
||||
'/ceph/snapshots/{}/{}'.format(pool, name): '/ceph/snapshots/{}/{}'.format(pool, new_name)
|
||||
})
|
||||
|
||||
def clone_volume(zk_conn, pool, name, new_name):
|
||||
# Tell the cluster to clone
|
||||
clone_volume_string = 'volume_clone {},{},{}'.format(pool, name, new_name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': clone_volume_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-volume_clone':
|
||||
message = 'Cloned RBD volume "{}" to "{}" on pool "{}".'.format(name, new_name, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to clone volume {} to {}; check node logs for details.'.format(name, new_name)
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
# 3. Get volume stats
|
||||
retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, new_name))
|
||||
volstats = stdout
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 4. Update the volume stats in Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, new_name): volstats,
|
||||
})
|
||||
|
||||
return success, message
|
||||
return True, 'Renamed RBD volume "{}" to "{}" in pool "{}".'.format(name, new_name, pool)
|
||||
|
||||
def remove_volume(zk_conn, pool, name):
|
||||
if not verifyVolume(zk_conn, pool, name):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool {}.'.format(name, pool)
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||
|
||||
# Tell the cluster to create a new volume
|
||||
remove_volume_string = 'volume_remove {},{}'.format(pool, name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_volume_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-volume_remove':
|
||||
message = 'Removed RBD volume "{}" in pool "{}".'.format(name, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to remove volume; check node logs for details.'
|
||||
success = False
|
||||
except Exception as e:
|
||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
||||
success = False
|
||||
# 1. Remove volume snapshots
|
||||
for snapshot in zkhandler.listchildren(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name)):
|
||||
remove_snapshot(zk_conn, logger, pool, volume, snapshot)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 2. Remove the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd rm {}/{}'.format(pool, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to remove RBD volume "{}" in pool "{}": {}'.format(name, pool, stderr)
|
||||
|
||||
return success, message
|
||||
# 3. Delete volume from Zookeeper
|
||||
zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name))
|
||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name))
|
||||
|
||||
return True, 'Removed RBD volume "{}" in pool "{}".'.format(name, pool)
|
||||
|
||||
def map_volume(zk_conn, pool, name):
|
||||
if not verifyVolume(zk_conn, pool, name):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||
|
||||
# 1. Map the volume onto the local system
|
||||
retcode, stdout, stderr = common.run_os_command('rbd map {}/{}'.format(pool, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to map RBD volume "{}" in pool "{}": {}'.format(name, pool, stderr)
|
||||
|
||||
# 2. Calculate the absolute path to the mapped volume
|
||||
mapped_volume = '/dev/rbd/{}/{}'.format(pool, name)
|
||||
|
||||
# 3. Ensure the volume exists
|
||||
if not os.path.exists(mapped_volume):
|
||||
return False, 'ERROR: Mapped volume not found at expected location "{}".'.format(mapped_volume)
|
||||
|
||||
return True, mapped_volume
|
||||
|
||||
def unmap_volume(zk_conn, pool, name):
|
||||
if not verifyVolume(zk_conn, pool, name):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool)
|
||||
|
||||
mapped_volume = '/dev/rbd/{}/{}'.format(pool, name)
|
||||
|
||||
# 1. Ensure the volume exists
|
||||
if not os.path.exists(mapped_volume):
|
||||
return False, 'ERROR: Mapped volume not found at expected location "{}".'.format(mapped_volume)
|
||||
|
||||
# 2. Unap the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd unmap {}'.format(mapped_volume))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to unmap RBD volume at "{}": {}'.format(mapped_volume, stderr)
|
||||
|
||||
return True, 'Unmapped RBD volume at "{}".'.format(mapped_volume)
|
||||
|
||||
def get_list_volume(zk_conn, pool, limit, is_fuzzy=True):
|
||||
volume_list = []
|
||||
@ -1144,7 +1086,7 @@ def get_list_volume(zk_conn, pool, limit, is_fuzzy=True):
|
||||
else:
|
||||
volume_list.append(getVolumeInformation(zk_conn, pool_name, volume_name))
|
||||
|
||||
return True, volume_list
|
||||
return True, sorted(volume_list, key = lambda x: str(x['name']))
|
||||
|
||||
def format_list_volume(volume_list):
|
||||
volume_list_output = []
|
||||
@ -1276,94 +1218,55 @@ def getCephSnapshots(zk_conn, pool, volume):
|
||||
return snapshot_list
|
||||
|
||||
def add_snapshot(zk_conn, pool, volume, name):
|
||||
# Tell the cluster to create a new snapshot
|
||||
add_snapshot_string = 'snapshot_add {},{},{}'.format(pool, volume, name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': add_snapshot_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-snapshot_add':
|
||||
message = 'Created new RBD snapshot "{}" of volume "{}" on pool "{}".'.format(name, volume, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to create new snapshot; check node logs for details.'
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
if not verifyVolume(zk_conn, pool, volume):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 1. Create the snapshot
|
||||
retcode, stdout, stderr = common.run_os_command('rbd snap create {}/{}@{}'.format(pool, volume, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to create RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr)
|
||||
|
||||
return success, message
|
||||
# 2. Add the snapshot to Zookeeper
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '',
|
||||
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
|
||||
})
|
||||
|
||||
return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
|
||||
def rename_snapshot(zk_conn, pool, volume, name, new_name):
|
||||
# Tell the cluster to rename
|
||||
rename_snapshot_string = 'snapshot_rename {},{},{}'.format(pool, name, new_name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': rename_snapshot_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-snapshot_rename':
|
||||
message = 'Renamed RBD volume snapshot "{}" to "{}" for volume {} on pool "{}".'.format(name, new_name, volume, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to rename volume {} to {}; check node logs for details.'.format(name, new_name)
|
||||
success = False
|
||||
except:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
if not verifyVolume(zk_conn, pool, volume):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||
if not verifySnapshot(zk_conn, pool, volume, name):
|
||||
return False, 'ERROR: No snapshot with name "{}" is present for volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 1. Rename the snapshot
|
||||
retcode, stdout, stderr = common.run_os_command('rbd snap rename {}/{}@{} {}'.format(pool, volume, name, new_name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to rename RBD snapshot "{}" to "{}" for volume "{}" in pool "{}": {}'.format(name, new_name, volume, pool, stderr)
|
||||
|
||||
return success, message
|
||||
# 2. Rename the snapshot in ZK
|
||||
zkhandler.renamekey(zk_conn, {
|
||||
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '/ceph/snapshots/{}/{}/{}'.format(pool, volume, new_name)
|
||||
})
|
||||
|
||||
return True, 'Renamed RBD snapshot "{}" to "{}" for volume "{}" in pool "{}".'.format(name, new_name, volume, pool)
|
||||
|
||||
def remove_snapshot(zk_conn, pool, volume, name):
|
||||
if not verifyVolume(zk_conn, pool, volume):
|
||||
return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool)
|
||||
if not verifySnapshot(zk_conn, pool, volume, name):
|
||||
return False, 'ERROR: No snapshot with name "{}" is present of volume {} on pool {}.'.format(name, volume, pool)
|
||||
return False, 'ERROR: No snapshot with name "{}" is present of volume {} in pool {}.'.format(name, volume, pool)
|
||||
|
||||
# Tell the cluster to create a new snapshot
|
||||
remove_snapshot_string = 'snapshot_remove {},{},{}'.format(pool, volume, name)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_snapshot_string})
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0]
|
||||
if result == 'success-snapshot_remove':
|
||||
message = 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to remove snapshot; check node logs for details.'
|
||||
success = False
|
||||
except Exception as e:
|
||||
message = 'ERROR: Command ignored by node: {}'.format(e)
|
||||
success = False
|
||||
# 1. Remove the snapshot
|
||||
retcode, stdout, stderr = common.run_os_command('rbd snap rm {}/{}@{}'.format(pool, volume, name))
|
||||
if retcode:
|
||||
return False, 'Failed to remove RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr)
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock(zk_conn, '/cmd/ceph')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.writedata(zk_conn, {'/cmd/ceph': ''})
|
||||
# 2. Delete snapshot from Zookeeper
|
||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name))
|
||||
|
||||
return success, message
|
||||
return True, 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
|
||||
def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
|
||||
snapshot_list = []
|
||||
@ -1394,7 +1297,7 @@ def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
|
||||
else:
|
||||
snapshot_list.append({'pool': pool_name, 'volume': volume_name, 'snapshot': snapshot_name})
|
||||
|
||||
return True, snapshot_list
|
||||
return True, sorted(snapshot_list, key = lambda x: int(x['id']))
|
||||
|
||||
def format_list_snapshot(snapshot_list):
|
||||
snapshot_list_output = []
|
@ -21,16 +21,17 @@
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from distutils.util import strtobool
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import client_lib.vm as pvc_vm
|
||||
import client_lib.node as pvc_node
|
||||
import client_lib.network as pvc_network
|
||||
import client_lib.ceph as pvc_ceph
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
import daemon_lib.common as common
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
def set_maintenance(zk_conn, maint_state):
|
||||
try:
|
||||
@ -50,6 +51,10 @@ def getClusterInformation(zk_conn):
|
||||
except:
|
||||
maint_state = 'false'
|
||||
|
||||
# List of messages to display to the clients
|
||||
cluster_health_msg = []
|
||||
storage_health_msg = []
|
||||
|
||||
# Get node information object list
|
||||
retcode, node_list = pvc_node.get_list(zk_conn, None)
|
||||
|
||||
@ -74,6 +79,36 @@ def getClusterInformation(zk_conn):
|
||||
ceph_volume_count = len(ceph_volume_list)
|
||||
ceph_snapshot_count = len(ceph_snapshot_list)
|
||||
|
||||
# Determinations for general cluster health
|
||||
cluster_healthy_status = True
|
||||
# Check for (n-1) overprovisioning
|
||||
# Assume X nodes. If the total VM memory allocation (counting only running VMss) is greater than
|
||||
# the total memory of the (n-1) smallest nodes, trigger this warning.
|
||||
n_minus_1_total = 0
|
||||
alloc_total = 0
|
||||
|
||||
node_largest_index = None
|
||||
node_largest_count = 0
|
||||
for index, node in enumerate(node_list):
|
||||
node_mem_total = node['memory']['total']
|
||||
node_mem_alloc = node['memory']['allocated']
|
||||
alloc_total += node_mem_alloc
|
||||
|
||||
# Determine if this node is the largest seen so far
|
||||
if node_mem_total > node_largest_count:
|
||||
node_largest_index = index
|
||||
node_largest_count = node_mem_total
|
||||
n_minus_1_node_list = list()
|
||||
for index, node in enumerate(node_list):
|
||||
if index == node_largest_index:
|
||||
continue
|
||||
n_minus_1_node_list.append(node)
|
||||
for index, node in enumerate(n_minus_1_node_list):
|
||||
n_minus_1_total += node['memory']['total']
|
||||
if alloc_total > n_minus_1_total:
|
||||
cluster_healthy_status = False
|
||||
cluster_health_msg.append("Total VM memory ({}) is overprovisioned (max {}) for (n-1) failure scenarios".format(alloc_total, n_minus_1_total))
|
||||
|
||||
# Determinations for node health
|
||||
node_healthy_status = list(range(0, node_count))
|
||||
node_report_status = list(range(0, node_count))
|
||||
@ -82,6 +117,7 @@ def getClusterInformation(zk_conn):
|
||||
domain_state = node['domain_state']
|
||||
if daemon_state != 'run' and domain_state != 'ready':
|
||||
node_healthy_status[index] = False
|
||||
cluster_health_msg.append("Node '{}' in {},{} state".format(node['name'], daemon_state, domain_state))
|
||||
else:
|
||||
node_healthy_status[index] = True
|
||||
node_report_status[index] = daemon_state + ',' + domain_state
|
||||
@ -93,6 +129,7 @@ def getClusterInformation(zk_conn):
|
||||
vm_state = vm['state']
|
||||
if vm_state not in ['start', 'disable', 'migrate', 'unmigrate', 'provision']:
|
||||
vm_healthy_status[index] = False
|
||||
cluster_health_msg.append("VM '{}' in {} state".format(vm['name'], vm_state))
|
||||
else:
|
||||
vm_healthy_status[index] = True
|
||||
vm_report_status[index] = vm_state
|
||||
@ -111,27 +148,51 @@ def getClusterInformation(zk_conn):
|
||||
except KeyError:
|
||||
ceph_osd_in = 0
|
||||
|
||||
if not ceph_osd_up or not ceph_osd_in:
|
||||
ceph_osd_healthy_status[index] = False
|
||||
else:
|
||||
ceph_osd_healthy_status[index] = True
|
||||
up_texts = { 1: 'up', 0: 'down' }
|
||||
in_texts = { 1: 'in', 0: 'out' }
|
||||
|
||||
if not ceph_osd_up or not ceph_osd_in:
|
||||
ceph_osd_healthy_status[index] = False
|
||||
cluster_health_msg.append('OSD {} in {},{} state'.format(ceph_osd['id'], up_texts[ceph_osd_up], in_texts[ceph_osd_in]))
|
||||
else:
|
||||
ceph_osd_healthy_status[index] = True
|
||||
ceph_osd_report_status[index] = up_texts[ceph_osd_up] + ',' + in_texts[ceph_osd_in]
|
||||
|
||||
# Find out the overall cluster health; if any element of a healthy_status is false, it's unhealthy
|
||||
if maint_state == 'true':
|
||||
cluster_health = 'Maintenance'
|
||||
elif False in node_healthy_status or False in vm_healthy_status or False in ceph_osd_healthy_status:
|
||||
elif cluster_healthy_status is False or False in node_healthy_status or False in vm_healthy_status or False in ceph_osd_healthy_status:
|
||||
cluster_health = 'Degraded'
|
||||
else:
|
||||
cluster_health = 'Optimal'
|
||||
|
||||
# Find out our storage health from Ceph
|
||||
ceph_status = zkhandler.readdata(zk_conn, '/ceph').split('\n')
|
||||
ceph_health = ceph_status[2].split()[-1]
|
||||
|
||||
# Parse the status output to get the health indicators
|
||||
line_record = False
|
||||
for index, line in enumerate(ceph_status):
|
||||
if re.search('services:', line):
|
||||
line_record = False
|
||||
if line_record and len(line.strip()) > 0:
|
||||
storage_health_msg.append(line.strip())
|
||||
if re.search('health:', line):
|
||||
line_record = True
|
||||
|
||||
if maint_state == 'true':
|
||||
storage_health = 'Maintenance'
|
||||
elif ceph_health != 'HEALTH_OK':
|
||||
storage_health = 'Degraded'
|
||||
else:
|
||||
storage_health = 'Optimal'
|
||||
|
||||
# State lists
|
||||
node_state_combinations = [
|
||||
'run,ready', 'run,flush', 'run,flushed', 'run,unflush',
|
||||
'init,ready', 'init,flush', 'init,flushed', 'init,unflush',
|
||||
'stop,ready', 'stop,flush', 'stop,flushed', 'stop,unflush'
|
||||
'stop,ready', 'stop,flush', 'stop,flushed', 'stop,unflush',
|
||||
'dead,ready', 'dead,flush', 'dead,flushed', 'dead,unflush'
|
||||
]
|
||||
vm_state_combinations = [
|
||||
'start', 'restart', 'shutdown', 'stop', 'disable', 'fail', 'migrate', 'unmigrate', 'provision'
|
||||
@ -173,6 +234,9 @@ def getClusterInformation(zk_conn):
|
||||
# Format the status data
|
||||
cluster_information = {
|
||||
'health': cluster_health,
|
||||
'health_msg': cluster_health_msg,
|
||||
'storage_health': storage_health,
|
||||
'storage_health_msg': storage_health_msg,
|
||||
'primary_node': common.getPrimaryNode(zk_conn),
|
||||
'upstream_ip': zkhandler.readdata(zk_conn, '/upstream_ip'),
|
||||
'nodes': formatted_node_states,
|
@ -23,16 +23,47 @@
|
||||
import uuid
|
||||
import lxml
|
||||
import math
|
||||
import shlex
|
||||
import subprocess
|
||||
import kazoo.client
|
||||
from json import loads
|
||||
|
||||
from distutils.util import strtobool
|
||||
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
|
||||
###############################################################################
|
||||
# Supplemental functions
|
||||
###############################################################################
|
||||
|
||||
#
|
||||
# Run a local OS command via shell
|
||||
#
|
||||
def run_os_command(command_string, background=False, environment=None, timeout=None, shell=False):
|
||||
command = shlex.split(command_string)
|
||||
try:
|
||||
command_output = subprocess.run(
|
||||
command,
|
||||
shell=shell,
|
||||
env=environment,
|
||||
timeout=timeout,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
retcode = command_output.returncode
|
||||
except subprocess.TimeoutExpired:
|
||||
retcode = 128
|
||||
|
||||
try:
|
||||
stdout = command_output.stdout.decode('ascii')
|
||||
except:
|
||||
stdout = ''
|
||||
try:
|
||||
stderr = command_output.stderr.decode('ascii')
|
||||
except:
|
||||
stderr = ''
|
||||
return retcode, stdout, stderr
|
||||
|
||||
#
|
||||
# Validate a UUID
|
||||
#
|
||||
@ -87,14 +118,14 @@ def getDomainMainDetails(parsed_xml):
|
||||
ddescription = "N/A"
|
||||
dname = str(parsed_xml.name)
|
||||
dmemory = str(parsed_xml.memory)
|
||||
dmemory_unit = str(parsed_xml.memory.attrib['unit'])
|
||||
dmemory_unit = str(parsed_xml.memory.attrib.get('unit'))
|
||||
if dmemory_unit == 'KiB':
|
||||
dmemory = int(int(dmemory) / 1024)
|
||||
elif dmemory_unit == 'GiB':
|
||||
dmemory = int(int(dmemory) * 1024)
|
||||
dvcpu = str(parsed_xml.vcpu)
|
||||
try:
|
||||
dvcputopo = '{}/{}/{}'.format(parsed_xml.cpu.topology.attrib['sockets'], parsed_xml.cpu.topology.attrib['cores'], parsed_xml.cpu.topology.attrib['threads'])
|
||||
dvcputopo = '{}/{}/{}'.format(parsed_xml.cpu.topology.attrib.get('sockets'), parsed_xml.cpu.topology.attrib.get('cores'), parsed_xml.cpu.topology.attrib.get('threads'))
|
||||
except:
|
||||
dvcputopo = 'N/A'
|
||||
|
||||
@ -117,25 +148,52 @@ def getDomainExtraDetails(parsed_xml):
|
||||
#
|
||||
def getDomainCPUFeatures(parsed_xml):
|
||||
dfeatures = []
|
||||
for feature in parsed_xml.features.getchildren():
|
||||
dfeatures.append(feature.tag)
|
||||
try:
|
||||
for feature in parsed_xml.features.getchildren():
|
||||
dfeatures.append(feature.tag)
|
||||
except:
|
||||
pass
|
||||
|
||||
return dfeatures
|
||||
|
||||
#
|
||||
# Get disk devices
|
||||
#
|
||||
def getDomainDisks(parsed_xml):
|
||||
def getDomainDisks(parsed_xml, stats_data):
|
||||
ddisks = []
|
||||
for device in parsed_xml.devices.getchildren():
|
||||
if device.tag == 'disk':
|
||||
disk_attrib = device.source.attrib
|
||||
disk_target = device.target.attrib
|
||||
disk_type = device.attrib['type']
|
||||
disk_type = device.attrib.get('type')
|
||||
disk_stats_list = [x for x in stats_data.get('disk_stats', []) if x.get('name') == disk_attrib.get('name')]
|
||||
try:
|
||||
disk_stats = disk_stats_list[0]
|
||||
except:
|
||||
disk_stats = {}
|
||||
|
||||
if disk_type == 'network':
|
||||
disk_obj = { 'type': disk_attrib.get('protocol'), 'name': disk_attrib.get('name'), 'dev': disk_target.get('dev'), 'bus': disk_target.get('bus') }
|
||||
disk_obj = {
|
||||
'type': disk_attrib.get('protocol'),
|
||||
'name': disk_attrib.get('name'),
|
||||
'dev': disk_target.get('dev'),
|
||||
'bus': disk_target.get('bus'),
|
||||
'rd_req': disk_stats.get('rd_req', 0),
|
||||
'rd_bytes': disk_stats.get('rd_bytes', 0),
|
||||
'wr_req': disk_stats.get('wr_req', 0),
|
||||
'wr_bytes': disk_stats.get('wr_bytes', 0)
|
||||
}
|
||||
elif disk_type == 'file':
|
||||
disk_obj = { 'type': 'file', 'name': disk_attrib.get('file'), 'dev': disk_target.get('dev'), 'bus': disk_target.get('bus') }
|
||||
disk_obj = {
|
||||
'type': 'file',
|
||||
'name': disk_attrib.get('file'),
|
||||
'dev': disk_target.get('dev'),
|
||||
'bus': disk_target.get('bus'),
|
||||
'rd_req': disk_stats.get('rd_req', 0),
|
||||
'rd_bytes': disk_stats.get('rd_bytes', 0),
|
||||
'wr_req': disk_stats.get('wr_req', 0),
|
||||
'wr_bytes': disk_stats.get('wr_bytes', 0)
|
||||
}
|
||||
else:
|
||||
disk_obj = {}
|
||||
ddisks.append(disk_obj)
|
||||
@ -194,13 +252,18 @@ def getInformationFromXML(zk_conn, uuid):
|
||||
|
||||
parsed_xml = getDomainXML(zk_conn, uuid)
|
||||
|
||||
try:
|
||||
stats_data = loads(zkhandler.readdata(zk_conn, '/domains/{}/stats'.format(uuid)))
|
||||
except:
|
||||
stats_data = {}
|
||||
|
||||
domain_uuid, domain_name, domain_description, domain_memory, domain_vcpu, domain_vcputopo = getDomainMainDetails(parsed_xml)
|
||||
domain_networks = getDomainNetworks(parsed_xml)
|
||||
domain_networks = getDomainNetworks(parsed_xml, stats_data)
|
||||
|
||||
domain_type, domain_arch, domain_machine, domain_console, domain_emulator = getDomainExtraDetails(parsed_xml)
|
||||
|
||||
domain_features = getDomainCPUFeatures(parsed_xml)
|
||||
domain_disks = getDomainDisks(parsed_xml)
|
||||
domain_disks = getDomainDisks(parsed_xml, stats_data)
|
||||
domain_controllers = getDomainControllers(parsed_xml)
|
||||
|
||||
if domain_lastnode:
|
||||
@ -222,8 +285,10 @@ def getInformationFromXML(zk_conn, uuid):
|
||||
'description': domain_description,
|
||||
'profile': domain_profile,
|
||||
'memory': int(domain_memory),
|
||||
'memory_stats': stats_data.get('mem_stats', {}),
|
||||
'vcpu': int(domain_vcpu),
|
||||
'vcpu_topology': domain_vcputopo,
|
||||
'vcpu_stats': stats_data.get('cpu_stats', {}),
|
||||
'networks': domain_networks,
|
||||
'type': domain_type,
|
||||
'arch': domain_arch,
|
||||
@ -241,15 +306,53 @@ def getInformationFromXML(zk_conn, uuid):
|
||||
#
|
||||
# Get network devices
|
||||
#
|
||||
def getDomainNetworks(parsed_xml):
|
||||
def getDomainNetworks(parsed_xml, stats_data):
|
||||
dnets = []
|
||||
for device in parsed_xml.devices.getchildren():
|
||||
if device.tag == 'interface':
|
||||
net_type = device.attrib['type']
|
||||
net_mac = device.mac.attrib['address']
|
||||
net_bridge = device.source.attrib[net_type]
|
||||
net_model = device.model.attrib['type']
|
||||
net_obj = { 'type': net_type, 'mac': net_mac, 'source': net_bridge, 'model': net_model }
|
||||
try:
|
||||
net_type = device.attrib.get('type')
|
||||
except:
|
||||
net_type = None
|
||||
try:
|
||||
net_mac = device.mac.attrib.get('address')
|
||||
except:
|
||||
net_mac = None
|
||||
try:
|
||||
net_bridge = device.source.attrib.get(net_type)
|
||||
except:
|
||||
net_bridge = None
|
||||
try:
|
||||
net_model = device.model.attrib.get('type')
|
||||
except:
|
||||
net_model = None
|
||||
try:
|
||||
net_stats_list = [x for x in stats_data.get('net_stats', []) if x.get('bridge') == net_bridge]
|
||||
net_stats = net_stats_list[0]
|
||||
except:
|
||||
net_stats = {}
|
||||
net_rd_bytes = net_stats.get('rd_bytes', 0)
|
||||
net_rd_packets = net_stats.get('rd_packets', 0)
|
||||
net_rd_errors = net_stats.get('rd_errors', 0)
|
||||
net_rd_drops = net_stats.get('rd_drops', 0)
|
||||
net_wr_bytes = net_stats.get('wr_bytes', 0)
|
||||
net_wr_packets = net_stats.get('wr_packets', 0)
|
||||
net_wr_errors = net_stats.get('wr_errors', 0)
|
||||
net_wr_drops = net_stats.get('wr_drops', 0)
|
||||
net_obj = {
|
||||
'type': net_type,
|
||||
'mac': net_mac,
|
||||
'source': net_bridge,
|
||||
'model': net_model,
|
||||
'rd_bytes': net_rd_bytes,
|
||||
'rd_packets': net_rd_packets,
|
||||
'rd_errors': net_rd_errors,
|
||||
'rd_drops': net_rd_drops,
|
||||
'wr_bytes': net_wr_bytes,
|
||||
'wr_packets': net_wr_packets,
|
||||
'wr_errors': net_wr_errors,
|
||||
'wr_drops': net_wr_drops
|
||||
}
|
||||
dnets.append(net_obj)
|
||||
|
||||
return dnets
|
||||
@ -261,9 +364,9 @@ def getDomainControllers(parsed_xml):
|
||||
dcontrollers = []
|
||||
for device in parsed_xml.devices.getchildren():
|
||||
if device.tag == 'controller':
|
||||
controller_type = device.attrib['type']
|
||||
controller_type = device.attrib.get('type')
|
||||
try:
|
||||
controller_model = device.attrib['model']
|
||||
controller_model = device.attrib.get('model')
|
||||
except KeyError:
|
||||
controller_model = 'none'
|
||||
controller_obj = { 'type': controller_type, 'model': controller_model }
|
||||
@ -363,19 +466,19 @@ def getNodes(zk_conn, node_limit, dom_uuid):
|
||||
|
||||
# via free memory (relative to allocated memory)
|
||||
def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
|
||||
most_allocfree = 0
|
||||
most_provfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
|
||||
memprov = int(zkhandler.readdata(zk_conn, '/nodes/{}/memprov'.format(node)))
|
||||
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
|
||||
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node)))
|
||||
memtotal = memused + memfree
|
||||
allocfree = memtotal - memalloc
|
||||
provfree = memtotal - memprov
|
||||
|
||||
if allocfree > most_allocfree:
|
||||
most_allocfree = allocfree
|
||||
if provfree > most_provfree:
|
||||
most_provfree = provfree
|
||||
target_node = node
|
||||
|
||||
return target_node
|
@ -34,9 +34,9 @@ import lxml.objectify
|
||||
import configparser
|
||||
import kazoo.client
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
import daemon_lib.common as common
|
||||
|
||||
#
|
||||
# Cluster search functions
|
||||
@ -168,11 +168,19 @@ def getNetworkInformation(zk_conn, vni):
|
||||
return network_information
|
||||
|
||||
def getDHCPLeaseInformation(zk_conn, vni, mac_address):
|
||||
hostname = zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}/hostname'.format(vni, mac_address))
|
||||
ip4_address = zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}/ipaddr'.format(vni, mac_address))
|
||||
# Check whether this is a dynamic or static lease
|
||||
try:
|
||||
timestamp = zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}/expiry'.format(vni, mac_address))
|
||||
except:
|
||||
zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_leases/{}'.format(vni, mac_address))
|
||||
type_key = 'dhcp4_leases'
|
||||
except kazoo.exceptions.NoNodeError:
|
||||
zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_reservations/{}'.format(vni, mac_address))
|
||||
type_key = 'dhcp4_reservations'
|
||||
|
||||
hostname = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/hostname'.format(vni, type_key, mac_address))
|
||||
ip4_address = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/ipaddr'.format(vni, type_key, mac_address))
|
||||
if type_key == 'dhcp4_leases':
|
||||
timestamp = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/expiry'.format(vni, type_key, mac_address))
|
||||
else:
|
||||
timestamp = 'static'
|
||||
|
||||
# Construct a data structure to represent the data
|
||||
@ -549,7 +557,8 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
|
||||
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
|
||||
reservations = True
|
||||
else:
|
||||
full_dhcp_list = getNetworkDHCPLeases(zk_conn, net_vni)
|
||||
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
|
||||
full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni)
|
||||
reservations = False
|
||||
|
||||
if limit:
|
||||
@ -565,7 +574,6 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
|
||||
except Exception as e:
|
||||
return False, 'Regex Error: {}'.format(e)
|
||||
|
||||
|
||||
for lease in full_dhcp_list:
|
||||
valid_lease = False
|
||||
if limit:
|
||||
@ -579,7 +587,6 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
|
||||
if valid_lease:
|
||||
dhcp_list.append(getDHCPLeaseInformation(zk_conn, net_vni, lease))
|
||||
|
||||
#output_string = formatDHCPLeaseList(zk_conn, net_vni, dhcp_list, reservations=reservations)
|
||||
return True, dhcp_list
|
||||
|
||||
def get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True):
|
@ -34,10 +34,10 @@ import lxml.objectify
|
||||
import configparser
|
||||
import kazoo.client
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import client_lib.vm as pvc_vm
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
import daemon_lib.common as common
|
||||
import daemon_lib.vm as pvc_vm
|
||||
|
||||
def getNodeInformation(zk_conn, node_name):
|
||||
"""
|
||||
@ -54,6 +54,7 @@ def getNodeInformation(zk_conn, node_name):
|
||||
node_vcpu_allocated = int(zkhandler.readdata(zk_conn, 'nodes/{}/vcpualloc'.format(node_name)))
|
||||
node_mem_total = int(zkhandler.readdata(zk_conn, '/nodes/{}/memtotal'.format(node_name)))
|
||||
node_mem_allocated = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node_name)))
|
||||
node_mem_provisioned = int(zkhandler.readdata(zk_conn, '/nodes/{}/memprov'.format(node_name)))
|
||||
node_mem_used = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node_name)))
|
||||
node_mem_free = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node_name)))
|
||||
node_load = float(zkhandler.readdata(zk_conn, '/nodes/{}/cpuload'.format(node_name)))
|
||||
@ -80,6 +81,7 @@ def getNodeInformation(zk_conn, node_name):
|
||||
'memory': {
|
||||
'total': node_mem_total,
|
||||
'allocated': node_mem_allocated,
|
||||
'provisioned': node_mem_provisioned,
|
||||
'used': node_mem_used,
|
||||
'free': node_mem_free
|
||||
}
|
||||
@ -143,7 +145,7 @@ def primary_node(zk_conn, node):
|
||||
|
||||
return True, retmsg
|
||||
|
||||
def flush_node(zk_conn, node, wait):
|
||||
def flush_node(zk_conn, node, wait=False):
|
||||
# Verify node is valid
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
|
||||
@ -155,7 +157,6 @@ def flush_node(zk_conn, node, wait):
|
||||
'/nodes/{}/domainstate'.format(node): 'flush'
|
||||
})
|
||||
|
||||
# Wait cannot be triggered from the API
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node)) == 'flush':
|
||||
time.sleep(1)
|
||||
@ -163,7 +164,7 @@ def flush_node(zk_conn, node, wait):
|
||||
|
||||
return True, retmsg
|
||||
|
||||
def ready_node(zk_conn, node, wait):
|
||||
def ready_node(zk_conn, node, wait=False):
|
||||
# Verify node is valid
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
|
||||
@ -175,7 +176,6 @@ def ready_node(zk_conn, node, wait):
|
||||
'/nodes/{}/domainstate'.format(node): 'unflush'
|
||||
})
|
||||
|
||||
# Wait cannot be triggered from the API
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node)) == 'unflush':
|
||||
time.sleep(1)
|
||||
@ -195,7 +195,7 @@ def get_info(zk_conn, node):
|
||||
|
||||
return True, node_information
|
||||
|
||||
def get_list(zk_conn, limit, is_fuzzy=True):
|
||||
def get_list(zk_conn, limit, daemon_state=None, coordinator_state=None, domain_state=None, is_fuzzy=True):
|
||||
node_list = []
|
||||
full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
|
||||
|
||||
@ -212,6 +212,20 @@ def get_list(zk_conn, limit, is_fuzzy=True):
|
||||
else:
|
||||
node_list.append(getNodeInformation(zk_conn, node))
|
||||
|
||||
if daemon_state or coordinator_state or domain_state:
|
||||
limited_node_list = []
|
||||
for node in node_list:
|
||||
add_node = False
|
||||
if daemon_state and node['daemon_state'] == daemon_state:
|
||||
add_node = True
|
||||
if coordinator_state and node['coordinator_state'] == coordinator_state:
|
||||
add_node = True
|
||||
if domain_state and node['domain_state'] == domain_state:
|
||||
add_node = True
|
||||
if add_node:
|
||||
limited_node_list.append(node)
|
||||
node_list = limited_node_list
|
||||
|
||||
return True, node_list
|
||||
|
||||
#
|
||||
@ -251,24 +265,25 @@ def format_info(node_information, long_output):
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
ainformation = []
|
||||
# Basic information
|
||||
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name']))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end()))
|
||||
ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end()))
|
||||
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end()))
|
||||
ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count']))
|
||||
ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name']))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end()))
|
||||
ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end()))
|
||||
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end()))
|
||||
ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count']))
|
||||
if long_output:
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch']))
|
||||
ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os']))
|
||||
ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel']))
|
||||
ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch']))
|
||||
ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os']))
|
||||
ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel']))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total']))
|
||||
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated']))
|
||||
ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load']))
|
||||
ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total']))
|
||||
ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used']))
|
||||
ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free']))
|
||||
ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated']))
|
||||
ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total']))
|
||||
ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated']))
|
||||
ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load']))
|
||||
ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total']))
|
||||
ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used']))
|
||||
ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free']))
|
||||
ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated']))
|
||||
ainformation.append('{}Provisioned RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['provisioned']))
|
||||
|
||||
# Join it all together
|
||||
information = '\n'.join(ainformation)
|
||||
@ -291,6 +306,7 @@ def format_list(node_list):
|
||||
mem_used_length = 5
|
||||
mem_free_length = 5
|
||||
mem_alloc_length = 4
|
||||
mem_prov_length = 4
|
||||
for node_information in node_list:
|
||||
# node_name column
|
||||
_node_name_length = len(node_information['name']) + 1
|
||||
@ -336,13 +352,18 @@ def format_list(node_list):
|
||||
_mem_alloc_length = len(str(node_information['memory']['allocated'])) + 1
|
||||
if _mem_alloc_length > mem_alloc_length:
|
||||
mem_alloc_length = _mem_alloc_length
|
||||
# mem_prov column
|
||||
_mem_prov_length = len(str(node_information['memory']['provisioned'])) + 1
|
||||
if _mem_prov_length > mem_prov_length:
|
||||
mem_prov_length = _mem_prov_length
|
||||
|
||||
|
||||
# Format the string (header)
|
||||
node_list_output.append(
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
St: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
Res: {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format(
|
||||
Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
@ -354,6 +375,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
daemon_state_colour='',
|
||||
@ -370,7 +392,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
node_mem_total='Total',
|
||||
node_mem_used='Used',
|
||||
node_mem_free='Free',
|
||||
node_mem_allocated='VMs'
|
||||
node_mem_allocated='VMs Run',
|
||||
node_mem_provisioned='VMs Total'
|
||||
)
|
||||
)
|
||||
|
||||
@ -381,7 +404,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format(
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
@ -393,6 +416,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
daemon_state_colour=daemon_state_colour,
|
||||
@ -409,7 +433,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
|
||||
node_mem_total=node_information['memory']['total'],
|
||||
node_mem_used=node_information['memory']['used'],
|
||||
node_mem_free=node_information['memory']['free'],
|
||||
node_mem_allocated=node_information['memory']['allocated']
|
||||
node_mem_allocated=node_information['memory']['allocated'],
|
||||
node_mem_provisioned=node_information['memory']['provisioned']
|
||||
)
|
||||
)
|
||||
|
@ -35,11 +35,11 @@ import kazoo.client
|
||||
|
||||
from collections import deque
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
import daemon_lib.zkhandler as zkhandler
|
||||
import daemon_lib.common as common
|
||||
|
||||
import client_lib.ceph as ceph
|
||||
import daemon_lib.ceph as ceph
|
||||
|
||||
#
|
||||
# Cluster search functions
|
||||
@ -179,7 +179,7 @@ def define_vm(zk_conn, config_data, target_node, node_limit, node_selector, node
|
||||
return False, 'ERROR: Specified node "{}" is invalid.'.format(target_node)
|
||||
|
||||
# Obtain the RBD disk list using the common functions
|
||||
ddisks = common.getDomainDisks(parsed_xml)
|
||||
ddisks = common.getDomainDisks(parsed_xml, {})
|
||||
rbd_list = []
|
||||
for disk in ddisks:
|
||||
if disk['type'] == 'rbd':
|
||||
@ -248,15 +248,38 @@ def modify_vm(zk_conn, domain, restart, new_vm_config):
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
dom_name = getDomainName(zk_conn, domain)
|
||||
|
||||
# Parse and valiate the XML
|
||||
try:
|
||||
parsed_xml = lxml.objectify.fromstring(new_vm_config)
|
||||
except:
|
||||
return False, 'ERROR: Failed to parse XML data.'
|
||||
|
||||
# Obtain the RBD disk list using the common functions
|
||||
ddisks = common.getDomainDisks(parsed_xml, {})
|
||||
rbd_list = []
|
||||
for disk in ddisks:
|
||||
if disk['type'] == 'rbd':
|
||||
rbd_list.append(disk['name'])
|
||||
|
||||
# Join the RBD list
|
||||
if isinstance(rbd_list, list) and rbd_list:
|
||||
formatted_rbd_list = ','.join(rbd_list)
|
||||
else:
|
||||
formatted_rbd_list = ''
|
||||
|
||||
# Add the modified config to Zookeeper
|
||||
zk_data = {
|
||||
'/domains/{}'.format(dom_uuid): dom_name,
|
||||
'/domains/{}/rbdlist'.format(dom_uuid): formatted_rbd_list,
|
||||
'/domains/{}/xml'.format(dom_uuid): new_vm_config
|
||||
}
|
||||
zkhandler.writedata(zk_conn, zk_data)
|
||||
|
||||
if restart:
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'restart'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'restart' })
|
||||
lock.release()
|
||||
|
||||
return True, ''
|
||||
|
||||
@ -270,13 +293,7 @@ def dump_vm(zk_conn, domain):
|
||||
|
||||
return True, vm_xml
|
||||
|
||||
def purge_vm(zk_conn, domain, is_cli=False):
|
||||
"""
|
||||
Helper function for both undefine and remove VM to perform the shutdown, termination,
|
||||
and configuration deletion.
|
||||
"""
|
||||
|
||||
def undefine_vm(zk_conn, domain, is_cli=False):
|
||||
def undefine_vm(zk_conn, domain):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -285,30 +302,25 @@ def undefine_vm(zk_conn, domain, is_cli=False):
|
||||
# Shut down the VM
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_vm_state != 'stop':
|
||||
if is_cli:
|
||||
click.echo('Forcibly stopping VM "{}".'.format(domain))
|
||||
# Set the domain into stop mode
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' })
|
||||
lock.release()
|
||||
|
||||
# Wait for 1 second to allow state to flow to all nodes
|
||||
if is_cli:
|
||||
click.echo('Waiting for cluster to update.')
|
||||
# Wait for 2 seconds to allow state to flow to all nodes
|
||||
time.sleep(2)
|
||||
|
||||
# Gracefully terminate the class instances
|
||||
if is_cli:
|
||||
click.echo('Deleting VM "{}" from nodes.'.format(domain))
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'delete'})
|
||||
time.sleep(2)
|
||||
|
||||
# Delete the configurations
|
||||
if is_cli:
|
||||
click.echo('Undefining VM "{}".'.format(domain))
|
||||
zkhandler.deletekey(zk_conn, '/domains/{}'.format(dom_uuid))
|
||||
|
||||
return True, 'Undefined VM "{}" from the cluster.'.format(domain)
|
||||
|
||||
def remove_vm(zk_conn, domain, is_cli=False):
|
||||
def remove_vm(zk_conn, domain):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -319,25 +331,20 @@ def remove_vm(zk_conn, domain, is_cli=False):
|
||||
# Shut down the VM
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_vm_state != 'stop':
|
||||
if is_cli:
|
||||
click.echo('Forcibly stopping VM "{}".'.format(domain))
|
||||
# Set the domain into stop mode
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' })
|
||||
lock.release()
|
||||
|
||||
# Wait for 1 second to allow state to flow to all nodes
|
||||
if is_cli:
|
||||
click.echo('Waiting for cluster to update.')
|
||||
# Wait for 2 seconds to allow state to flow to all nodes
|
||||
time.sleep(2)
|
||||
|
||||
# Gracefully terminate the class instances
|
||||
if is_cli:
|
||||
click.echo('Deleting VM "{}" from nodes.'.format(domain))
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'delete'})
|
||||
time.sleep(2)
|
||||
|
||||
# Delete the configurations
|
||||
if is_cli:
|
||||
click.echo('Undefining VM "{}".'.format(domain))
|
||||
zkhandler.deletekey(zk_conn, '/domains/{}'.format(dom_uuid))
|
||||
time.sleep(2)
|
||||
|
||||
@ -347,8 +354,6 @@ def remove_vm(zk_conn, domain, is_cli=False):
|
||||
try:
|
||||
disk_pool, disk_name = disk.split('/')
|
||||
retcode, message = ceph.remove_volume(zk_conn, disk_pool, disk_name)
|
||||
if is_cli and message:
|
||||
click.echo('{}'.format(message))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
@ -361,11 +366,14 @@ def start_vm(zk_conn, domain):
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Set the VM to start
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'start'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'start' })
|
||||
lock.release()
|
||||
|
||||
return True, 'Starting VM "{}".'.format(domain)
|
||||
|
||||
def restart_vm(zk_conn, domain):
|
||||
def restart_vm(zk_conn, domain, wait=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -376,12 +384,22 @@ def restart_vm(zk_conn, domain):
|
||||
if current_state != 'start':
|
||||
return False, 'ERROR: VM "{}" is not in "start" state!'.format(domain)
|
||||
|
||||
# Set the VM to start
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'restart'})
|
||||
retmsg = 'Restarting VM "{}".'.format(domain)
|
||||
|
||||
return True, 'Restarting VM "{}".'.format(domain)
|
||||
# Set the VM to restart
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'restart' })
|
||||
lock.release()
|
||||
|
||||
def shutdown_vm(zk_conn, domain):
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) == 'restart':
|
||||
time.sleep(1)
|
||||
retmsg = 'Restarted VM "{}"'.format(domain)
|
||||
|
||||
return True, retmsg
|
||||
|
||||
def shutdown_vm(zk_conn, domain, wait=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -392,10 +410,20 @@ def shutdown_vm(zk_conn, domain):
|
||||
if current_state != 'start':
|
||||
return False, 'ERROR: VM "{}" is not in "start" state!'.format(domain)
|
||||
|
||||
retmsg = 'Shutting down VM "{}"'.format(domain)
|
||||
|
||||
# Set the VM to shutdown
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'shutdown'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' })
|
||||
lock.release()
|
||||
|
||||
return True, 'Shutting down VM "{}".'.format(domain)
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) == 'shutdown':
|
||||
time.sleep(1)
|
||||
retmsg = 'Shut down VM "{}"'.format(domain)
|
||||
|
||||
return True, retmsg
|
||||
|
||||
def stop_vm(zk_conn, domain):
|
||||
# Validate that VM exists in cluster
|
||||
@ -407,7 +435,10 @@ def stop_vm(zk_conn, domain):
|
||||
current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
|
||||
# Set the VM to start
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'stop' })
|
||||
lock.release()
|
||||
|
||||
return True, 'Forcibly stopping VM "{}".'.format(domain)
|
||||
|
||||
@ -423,16 +454,30 @@ def disable_vm(zk_conn, domain):
|
||||
return False, 'ERROR: VM "{}" must be stopped before disabling!'.format(domain)
|
||||
|
||||
# Set the VM to start
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'disable'})
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(dom_uuid): 'disable' })
|
||||
lock.release()
|
||||
|
||||
return True, 'Marked VM "{}" as disable.'.format(domain)
|
||||
|
||||
def move_vm(zk_conn, domain, target_node):
|
||||
def move_vm(zk_conn, domain, target_node, wait=False, force_live=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Get state and verify we're OK to proceed
|
||||
current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_state != 'start':
|
||||
# If the current state isn't start, preserve it; we're not doing live migration
|
||||
target_state = current_state
|
||||
else:
|
||||
if force_live:
|
||||
target_state = 'migrate-live'
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
|
||||
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
|
||||
|
||||
if not target_node:
|
||||
@ -450,27 +495,35 @@ def move_vm(zk_conn, domain, target_node):
|
||||
|
||||
# Verify if node is current node
|
||||
if target_node == current_node:
|
||||
last_node = zkhandler.readdata(zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
|
||||
if last_node:
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/lastnode'.format(dom_uuid): ''})
|
||||
return True, 'Making temporary migration permanent for VM "{}".'.format(domain)
|
||||
|
||||
return False, 'ERROR: VM "{}" is already running on node "{}".'.format(domain, current_node)
|
||||
|
||||
if not target_node:
|
||||
return False, 'ERROR: Could not find a valid migration target for VM "{}".'.format(domain)
|
||||
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_vm_state == 'start':
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/domains/{}/state'.format(dom_uuid): 'migrate',
|
||||
'/domains/{}/node'.format(dom_uuid): target_node,
|
||||
'/domains/{}/lastnode'.format(dom_uuid): ''
|
||||
})
|
||||
else:
|
||||
zkhandler.writedata(zk_conn, {
|
||||
retmsg = 'Permanently migrating VM "{}" to node "{}".'.format(domain, target_node)
|
||||
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/domains/{}/state'.format(dom_uuid): target_state,
|
||||
'/domains/{}/node'.format(dom_uuid): target_node,
|
||||
'/domains/{}/lastnode'.format(dom_uuid): ''
|
||||
})
|
||||
lock.release()
|
||||
|
||||
return True, 'Permanently migrating VM "{}" to node "{}".'.format(domain, target_node)
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) == target_state:
|
||||
time.sleep(1)
|
||||
retmsg = 'Permanently migrated VM "{}" to node "{}"'.format(domain, target_node)
|
||||
|
||||
def migrate_vm(zk_conn, domain, target_node, force_migrate, is_cli=False):
|
||||
return True, retmsg
|
||||
|
||||
def migrate_vm(zk_conn, domain, target_node, force_migrate, wait=False, force_live=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -479,22 +532,19 @@ def migrate_vm(zk_conn, domain, target_node, force_migrate, is_cli=False):
|
||||
# Get state and verify we're OK to proceed
|
||||
current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_state != 'start':
|
||||
target_state = 'start'
|
||||
# If the current state isn't start, preserve it; we're not doing live migration
|
||||
target_state = current_state
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
if force_live:
|
||||
target_state = 'migrate-live'
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
|
||||
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
|
||||
last_node = zkhandler.readdata(zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
|
||||
|
||||
if last_node and not force_migrate:
|
||||
if is_cli:
|
||||
click.echo('ERROR: VM "{}" has been previously migrated.'.format(domain))
|
||||
click.echo('> Last node: {}'.format(last_node))
|
||||
click.echo('> Current node: {}'.format(current_node))
|
||||
click.echo('Run `vm unmigrate` to restore the VM to its previous node, or use `--force` to override this check.')
|
||||
return False, ''
|
||||
else:
|
||||
return False, 'ERROR: VM "{}" has been previously migrated.'.format(domain)
|
||||
return False, 'ERROR: VM "{}" has been previously migrated.'.format(domain)
|
||||
|
||||
if not target_node:
|
||||
target_node = common.findTargetNode(zk_conn, dom_uuid)
|
||||
@ -520,15 +570,25 @@ def migrate_vm(zk_conn, domain, target_node, force_migrate, is_cli=False):
|
||||
if last_node and force_migrate:
|
||||
current_node = last_node
|
||||
|
||||
retmsg = 'Migrating VM "{}" to node "{}".'.format(domain, target_node)
|
||||
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/domains/{}/state'.format(dom_uuid): 'migrate',
|
||||
'/domains/{}/state'.format(dom_uuid): target_state,
|
||||
'/domains/{}/node'.format(dom_uuid): target_node,
|
||||
'/domains/{}/lastnode'.format(dom_uuid): current_node
|
||||
})
|
||||
lock.release()
|
||||
|
||||
return True, 'Migrating VM "{}" to node "{}".'.format(domain, target_node)
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) == target_state:
|
||||
time.sleep(1)
|
||||
retmsg = 'Migrated VM "{}" to node "{}"'.format(domain, target_node)
|
||||
|
||||
def unmigrate_vm(zk_conn, domain):
|
||||
return True, retmsg
|
||||
|
||||
def unmigrate_vm(zk_conn, domain, wait=False, force_live=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
@ -540,20 +600,33 @@ def unmigrate_vm(zk_conn, domain):
|
||||
# If the current state isn't start, preserve it; we're not doing live migration
|
||||
target_state = current_state
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
if force_live:
|
||||
target_state = 'migrate-live'
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
|
||||
target_node = zkhandler.readdata(zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
|
||||
|
||||
if target_node == '':
|
||||
return False, 'ERROR: VM "{}" has not been previously migrated.'.format(domain)
|
||||
|
||||
retmsg = 'Unmigrating VM "{}" back to node "{}".'.format(domain, target_node)
|
||||
|
||||
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
lock.acquire()
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/domains/{}/state'.format(dom_uuid): target_state,
|
||||
'/domains/{}/node'.format(dom_uuid): target_node,
|
||||
'/domains/{}/lastnode'.format(dom_uuid): ''
|
||||
})
|
||||
lock.release()
|
||||
|
||||
return True, 'Unmigrating VM "{}" back to node "{}".'.format(domain, target_node)
|
||||
if wait:
|
||||
while zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) == target_state:
|
||||
time.sleep(1)
|
||||
retmsg = 'Unmigrated VM "{}" back to node "{}"'.format(domain, target_node)
|
||||
|
||||
return True, retmsg
|
||||
|
||||
def get_console_log(zk_conn, domain, lines=1000):
|
||||
# Validate that VM exists in cluster
|
||||
@ -570,54 +643,6 @@ def get_console_log(zk_conn, domain, lines=1000):
|
||||
|
||||
return True, loglines
|
||||
|
||||
def follow_console_log(zk_conn, domain, lines=10):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Get the initial data from ZK
|
||||
console_log = zkhandler.readdata(zk_conn, '/domains/{}/consolelog'.format(dom_uuid))
|
||||
|
||||
# Shrink the log buffer to length lines
|
||||
shrunk_log = console_log.split('\n')[-lines:]
|
||||
loglines = '\n'.join(shrunk_log)
|
||||
|
||||
# Print the initial data and begin following
|
||||
print(loglines, end='')
|
||||
|
||||
try:
|
||||
while True:
|
||||
# Grab the next line set
|
||||
new_console_log = zkhandler.readdata(zk_conn, '/domains/{}/consolelog'.format(dom_uuid))
|
||||
# Split the new and old log strings into constitutent lines
|
||||
old_console_loglines = console_log.split('\n')
|
||||
new_console_loglines = new_console_log.split('\n')
|
||||
# Set the console log to the new log value for the next iteration
|
||||
console_log = new_console_log
|
||||
# Remove the lines from the old log until we hit the first line of the new log; this
|
||||
# ensures that the old log is a string that we can remove from the new log entirely
|
||||
for index, line in enumerate(old_console_loglines, start=0):
|
||||
if line == new_console_loglines[0]:
|
||||
del old_console_loglines[0:index]
|
||||
break
|
||||
# Rejoin the log lines into strings
|
||||
old_console_log = '\n'.join(old_console_loglines)
|
||||
new_console_log = '\n'.join(new_console_loglines)
|
||||
# Remove the old lines from the new log
|
||||
diff_console_log = new_console_log.replace(old_console_log, "")
|
||||
# If there's a difference, print it out
|
||||
if diff_console_log:
|
||||
print(diff_console_log, end='')
|
||||
# Wait a second
|
||||
time.sleep(1)
|
||||
except kazoo.exceptions.NoNodeError:
|
||||
return False, 'ERROR: VM has gone away.'
|
||||
except:
|
||||
return False, 'ERROR: Lost connection to Zookeeper node.'
|
||||
|
||||
return True, ''
|
||||
|
||||
def get_info(zk_conn, domain):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
@ -23,7 +23,7 @@
|
||||
import kazoo.client
|
||||
import uuid
|
||||
|
||||
import client_lib.ansiprint as ansiprint
|
||||
import daemon_lib.ansiprint as ansiprint
|
||||
|
||||
# Exists function
|
||||
def exists(zk_conn, key):
|
||||
@ -42,6 +42,45 @@ def listchildren(zk_conn, key):
|
||||
def deletekey(zk_conn, key, recursive=True):
|
||||
zk_conn.delete(key, recursive=recursive)
|
||||
|
||||
# Rename key recursive function
|
||||
def rename_key_element(zk_conn, zk_transaction, source_key, destination_key):
|
||||
data_raw = zk_conn.get(source_key)
|
||||
data = data_raw[0]
|
||||
zk_transaction.create(destination_key, data)
|
||||
|
||||
if zk_conn.get_children(source_key):
|
||||
for child_key in zk_conn.get_children(source_key):
|
||||
child_source_key = "{}/{}".format(source_key, child_key)
|
||||
child_destination_key = "{}/{}".format(destination_key, child_key)
|
||||
rename_key_element(zk_conn, zk_transaction, child_source_key, child_destination_key)
|
||||
|
||||
zk_transaction.delete(source_key)
|
||||
|
||||
# Rename key function
|
||||
def renamekey(zk_conn, kv):
|
||||
# Start up a transaction
|
||||
zk_transaction = zk_conn.transaction()
|
||||
|
||||
# Proceed one KV pair at a time
|
||||
for source_key in sorted(kv):
|
||||
destination_key = kv[source_key]
|
||||
|
||||
# Check if the source key exists or fail out
|
||||
if not zk_conn.exists(source_key):
|
||||
raise
|
||||
# Check if the destination key exists and fail out
|
||||
if zk_conn.exists(destination_key):
|
||||
raise
|
||||
|
||||
rename_key_element(zk_conn, zk_transaction, source_key, destination_key)
|
||||
|
||||
# Commit the transaction
|
||||
try:
|
||||
zk_transaction.commit()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Data read function
|
||||
def readdata(zk_conn, key):
|
||||
data_raw = zk_conn.get(key)
|
||||
@ -89,12 +128,49 @@ def writedata(zk_conn, kv):
|
||||
|
||||
# Write lock function
|
||||
def writelock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.WriteLock('{}'.format(key), lock_id)
|
||||
while True:
|
||||
try:
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.WriteLock('{}'.format(key), lock_id)
|
||||
break
|
||||
except Exception:
|
||||
count += 1
|
||||
if count > 5:
|
||||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
return lock
|
||||
|
||||
# Read lock function
|
||||
def readlock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.ReadLock('{}'.format(key), lock_id)
|
||||
while True:
|
||||
try:
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.ReadLock('{}'.format(key), lock_id)
|
||||
break
|
||||
except Exception:
|
||||
count += 1
|
||||
if count > 5:
|
||||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
return lock
|
||||
|
||||
# Exclusive lock function
|
||||
def exclusivelock(zk_conn, key):
|
||||
count = 1
|
||||
while True:
|
||||
try:
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.Lock('{}'.format(key), lock_id)
|
||||
break
|
||||
except Exception:
|
||||
count += 1
|
||||
if count > 5:
|
||||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
return lock
|
26
debian/changelog
vendored
26
debian/changelog
vendored
@ -1,8 +1,32 @@
|
||||
pvc (0.9.0-0) unstable; urgency=high
|
||||
|
||||
* Numerous bugfixes and improvements
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Sun, 18 Oct 2020 14:31:00 -0400
|
||||
|
||||
pvc (0.8-1) unstable; urgency=high
|
||||
|
||||
* Fix bug with IPv6 being enabled on bridged interfaces
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Thu, 15 Oct 2020 11:02:24 -0400
|
||||
|
||||
pvc (0.8-0) unstable; urgency=medium
|
||||
|
||||
* Numerous bugfixes and improvements
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Tue, 11 Aug 2020 12:12:07 -0400
|
||||
|
||||
pvc (0.7-0) unstable; urgency=medium
|
||||
|
||||
* Numerous bugfixes and improvements
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Sat, 15 Feb 2020 23:24:17 -0500
|
||||
|
||||
pvc (0.6-0) unstable; urgency=medium
|
||||
|
||||
* Numerous improvements, implementation of provisioner and API client
|
||||
|
||||
-- Joshua Boniface <joshua@boniface.me> Sat, 08 Feb 2019 18:26:58 -0500
|
||||
-- Joshua Boniface <joshua@boniface.me> Sat, 08 Feb 2020 18:26:58 -0500
|
||||
|
||||
pvc (0.5-0) unstable; urgency=medium
|
||||
|
||||
|
30
debian/control
vendored
30
debian/control
vendored
@ -6,34 +6,34 @@ Standards-Version: 3.9.8
|
||||
Homepage: https://www.boniface.me
|
||||
X-Python3-Version: >= 3.2
|
||||
|
||||
Package: pvc-daemon
|
||||
Package: pvc-daemon-node
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-client-common, python3-kazoo, python3-psutil, python3-apscheduler, python3-libvirt, python3-psycopg2, python3-dnspython, python3-yaml, python3-distutils, ipmitool, libvirt-daemon-system, arping, vlan, bridge-utils, dnsmasq, nftables, pdns-server, pdns-backend-pgsql
|
||||
Depends: systemd, pvc-daemon-common, python3-kazoo, python3-psutil, python3-apscheduler, python3-libvirt, python3-psycopg2, python3-dnspython, python3-yaml, python3-distutils, python3-rados, python3-gevent, ipmitool, libvirt-daemon-system, arping, vlan, bridge-utils, dnsmasq, nftables, pdns-server, pdns-backend-pgsql
|
||||
Suggests: pvc-client-api, pvc-client-cli
|
||||
Description: Parallel Virtual Cluster virtualization daemon (Python 3)
|
||||
Description: Parallel Virtual Cluster node daemon (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC node daemon
|
||||
|
||||
Package: pvc-client-common
|
||||
Package: pvc-daemon-api
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, redis, python3-redis, python3-lxml, python3-flask-migrate, python3-flask-script, fio
|
||||
Description: Parallel Virtual Cluster API daemon (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC API daemon
|
||||
|
||||
Package: pvc-daemon-common
|
||||
Architecture: all
|
||||
Depends: python3-kazoo, python3-psutil, python3-click, python3-lxml
|
||||
Description: Parallel Virtual Cluster common client libraries (Python 3)
|
||||
Description: Parallel Virtual Cluster common libraries (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the common client libraries
|
||||
|
||||
Package: pvc-client-api
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-client-common, python3-yaml, python3-flask, python3-flask-restful, python3-gevent, python3-celery, python-celery-common, python3-distutils, redis, python3-redis
|
||||
Description: Parallel Virtual Cluster API client (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
This package installs the PVC API client daemon
|
||||
This package installs the common libraries for the daemon and API
|
||||
|
||||
Package: pvc-client-cli
|
||||
Architecture: all
|
||||
Depends: python3-requests, python3-yaml, python3-lxml
|
||||
Depends: python3-requests, python3-requests-toolbelt, python3-yaml, python3-lxml, python3-click
|
||||
Description: Parallel Virtual Cluster CLI client (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
|
6
debian/pvc-client-api.install
vendored
6
debian/pvc-client-api.install
vendored
@ -1,6 +0,0 @@
|
||||
client-api/pvc-api.py usr/share/pvc
|
||||
client-api/pvc-api.sample.yaml etc/pvc
|
||||
client-api/api_lib usr/share/pvc
|
||||
client-api/pvc-api.service lib/systemd/system
|
||||
client-api/pvc-provisioner-worker.service lib/systemd/system
|
||||
client-api/provisioner usr/share/pvc
|
20
debian/pvc-client-api.postinst
vendored
20
debian/pvc-client-api.postinst
vendored
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Install client binary to /usr/bin via symlink
|
||||
ln -s /usr/share/pvc/api.py /usr/bin/pvc-api
|
||||
|
||||
# Reload systemd's view of the units
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart the main daemon (or warn on first install)
|
||||
if systemctl is-active --quiet pvc-api.service; then
|
||||
systemctl restart pvc-api.service
|
||||
else
|
||||
echo "NOTE: The PVC client API daemon (pvc-api.service) has not been started; create a config file at /etc/pvc/pvc-api.yaml then start it."
|
||||
fi
|
||||
# Restart the worker daemon (or warn on first install)
|
||||
if systemctl is-active --quiet pvc-provisioner-worker.service; then
|
||||
systemctl restart pvc-provisioner-worker.service
|
||||
else
|
||||
echo "NOTE: The PVC provisioner worker daemon (pvc-provisioner-worker.service) has not been started; create a config file at /etc/pvc/pvc-api.yaml then start it."
|
||||
fi
|
1
debian/pvc-client-cli.install
vendored
1
debian/pvc-client-cli.install
vendored
@ -1,2 +1,3 @@
|
||||
client-cli/pvc.py usr/share/pvc
|
||||
client-cli/cli_lib usr/share/pvc
|
||||
client-cli/scripts usr/share/pvc
|
||||
|
1
debian/pvc-client-common.install
vendored
1
debian/pvc-client-common.install
vendored
@ -1 +0,0 @@
|
||||
client-common/* usr/share/pvc/client_lib
|
9
debian/pvc-daemon-api.install
vendored
Normal file
9
debian/pvc-daemon-api.install
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
api-daemon/pvcapid.py usr/share/pvc
|
||||
api-daemon/pvcapid-manage.py usr/share/pvc
|
||||
api-daemon/pvc-api-db-upgrade usr/share/pvc
|
||||
api-daemon/pvcapid.sample.yaml etc/pvc
|
||||
api-daemon/pvcapid usr/share/pvc
|
||||
api-daemon/pvcapid.service lib/systemd/system
|
||||
api-daemon/pvcapid-worker.service lib/systemd/system
|
||||
api-daemon/provisioner usr/share/pvc
|
||||
api-daemon/migrations usr/share/pvc
|
15
debian/pvc-daemon-api.postinst
vendored
Normal file
15
debian/pvc-daemon-api.postinst
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Reload systemd's view of the units
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart the main daemon and apply database migrations (or warn on first install)
|
||||
if systemctl is-active --quiet pvcapid.service; then
|
||||
systemctl stop pvcapid-worker.service
|
||||
systemctl stop pvcapid.service
|
||||
/usr/share/pvc/pvc-api-db-upgrade
|
||||
systemctl start pvcapid.service
|
||||
systemctl start pvcapid-worker.service
|
||||
else
|
||||
echo "NOTE: The PVC client API daemon (pvcapid.service) and the PVC provisioner worker daemon (pvcapid-worker.service) have not been started; create a config file at /etc/pvc/pvcapid.yaml, then run the database configuration (/usr/share/pvc/pvc-api-db-upgrade) and start them manually."
|
||||
fi
|
@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Remove client binary symlink
|
||||
rm -f /usr/bin/pvc-api
|
||||
rm -f /usr/bin/pvcapid
|
1
debian/pvc-daemon-common.install
vendored
Normal file
1
debian/pvc-daemon-common.install
vendored
Normal file
@ -0,0 +1 @@
|
||||
daemon-common/* usr/share/pvc/daemon_lib
|
7
debian/pvc-daemon-node.install
vendored
Normal file
7
debian/pvc-daemon-node.install
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
node-daemon/pvcnoded.py usr/share/pvc
|
||||
node-daemon/pvcnoded.sample.yaml etc/pvc
|
||||
node-daemon/pvcnoded usr/share/pvc
|
||||
node-daemon/pvcnoded.service lib/systemd/system
|
||||
node-daemon/pvc.target lib/systemd/system
|
||||
node-daemon/pvc-flush.service lib/systemd/system
|
||||
node-daemon/monitoring usr/share/pvc
|
@ -4,8 +4,8 @@
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable the service and target
|
||||
systemctl enable /lib/systemd/system/pvcd.service
|
||||
systemctl enable /lib/systemd/system/pvcd.target
|
||||
systemctl enable /lib/systemd/system/pvcnoded.service
|
||||
systemctl enable /lib/systemd/system/pvc.target
|
||||
|
||||
# Inform administrator of the autoflush daemon if it is not enabled
|
||||
if ! systemctl is-active --quiet pvc-flush.service; then
|
||||
@ -13,8 +13,8 @@ if ! systemctl is-active --quiet pvc-flush.service; then
|
||||
fi
|
||||
|
||||
# Inform administrator of the service restart/startup not occurring automatically
|
||||
if systemctl is-active --quiet pvcd.service; then
|
||||
echo "NOTE: The PVC node daemon (pvcd.service) has not been restarted; this is up to the administrator."
|
||||
if systemctl is-active --quiet pvcnoded.service; then
|
||||
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been restarted; this is up to the administrator."
|
||||
else
|
||||
echo "NOTE: The PVC node daemon (pvcd.service) has not been started; create a config file at /etc/pvc/pvcd.yaml then start it."
|
||||
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been started; create a config file at /etc/pvc/pvcnoded.yaml then start it."
|
||||
fi
|
5
debian/pvc-daemon-node.prerm
vendored
Normal file
5
debian/pvc-daemon-node.prerm
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Disable the services
|
||||
systemctl disable pvcnoded.service
|
||||
systemctl disable pvc.target
|
6
debian/pvc-daemon.install
vendored
6
debian/pvc-daemon.install
vendored
@ -1,6 +0,0 @@
|
||||
node-daemon/pvcd.py usr/share/pvc
|
||||
node-daemon/pvcd.sample.yaml etc/pvc
|
||||
node-daemon/pvcd usr/share/pvc
|
||||
node-daemon/pvcd.target lib/systemd/system
|
||||
node-daemon/pvcd.service lib/systemd/system
|
||||
node-daemon/pvc-flush.service lib/systemd/system
|
5
debian/pvc-daemon.prerm
vendored
5
debian/pvc-daemon.prerm
vendored
@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Disable the services
|
||||
systemctl disable pvcd.service
|
||||
systemctl disable pvcd.target
|
@ -32,7 +32,7 @@ Within each node, the PVC daemon is a single Python 3 program which handles all
|
||||
|
||||
The daemon uses an object-oriented approach, with most cluster objects being represented by class objects of a specific type. Each node has a full view of all cluster objects and can interact with them based on events from the cluster as needed.
|
||||
|
||||
Further information about the node daemon architecture can be found at the [daemon architecture page](/architecture/daemon).
|
||||
Further information about the node daemon manual can be found at the [daemon manual page](/manuals/daemon).
|
||||
|
||||
## Client Architecture
|
||||
|
||||
@ -50,7 +50,7 @@ The API client uses a dedicated, independent set of functions to perform the act
|
||||
|
||||
### CLI client
|
||||
|
||||
The CLI client interface is a Click application, which provides a convenient CLI interface to the API client. It supports connecting to multiple clusters, over both HTTP and HTTPS and with authentication, including a special "local" cluster if the client determines that an `/etc/pvc/pvc-api.yaml` configuration exists on the host.
|
||||
The CLI client interface is a Click application, which provides a convenient CLI interface to the API client. It supports connecting to multiple clusters, over both HTTP and HTTPS and with authentication, including a special "local" cluster if the client determines that an `/etc/pvc/pvcapid.yaml` configuration exists on the host.
|
||||
|
||||
The CLI client is self-documenting using the `-h`/`--help` arguments, though a short manual can be found at the [CLI manual page](/manuals/cli).
|
||||
|
||||
@ -58,9 +58,7 @@ The CLI client is self-documenting using the `-h`/`--help` arguments, though a s
|
||||
|
||||
The overall management, deployment, bootstrapping, and configuring of nodes is accomplished via a set of Ansible roles, found in the [`pvc-ansible` repository](https://github.com/parallelvirtualcluster/pvc-ansible), and nodes are installed via a custom installer ISO generated by the [`pvc-installer` repository](https://github.com/parallelvirtualcluster/pvc-installer). Once the cluster is set up, nodes can be added, replaced, or updated using this Ansible framework.
|
||||
|
||||
Further information about the Ansible deployment architecture can be found at the [Ansible architecture page](/architecture/ansible).
|
||||
|
||||
The Ansible configuration manual can be found at the [Ansible manual page](/manuals/ansible).
|
||||
The Ansible configuration and architecture manual can be found at the [Ansible manual page](/manuals/ansible).
|
||||
|
||||
## About the author
|
||||
|
||||
|
@ -1,43 +0,0 @@
|
||||
# PVC Ansible architecture
|
||||
|
||||
The PVC Ansible setup and management framework is written in Ansible. It consists of two roles: `base` and `pvc`.
|
||||
|
||||
## Base role
|
||||
|
||||
The Base role configures a node to a specific, standard base Debian system, with a number of PVC-specific tweaks. Some examples include:
|
||||
|
||||
* Installing the custom PVC repository at Boniface Labs.
|
||||
|
||||
* Removing several unnecessary packages and installing numerous additional packages.
|
||||
|
||||
* Automatically configuring network interfaces based on the `group_vars` configuration.
|
||||
|
||||
* Configuring several general `sysctl` settings for optimal performance.
|
||||
|
||||
* Installing and configuring rsyslog, postfix, ntpd, ssh, and fail2ban.
|
||||
|
||||
* Creating the users specified in the `group_vars` configuration.
|
||||
|
||||
* Installing custom MOTDs, bashrc files, vimrc files, and other useful configurations for each user.
|
||||
|
||||
The end result is a standardized "PVC node" system ready to have the daemons installed by the PVC role.
|
||||
|
||||
## PVC role
|
||||
|
||||
The PVC role configures all the dependencies of PVC, including storage, networking, and databases, then installs the PVC daemon itself. Specifically, it will, in order:
|
||||
|
||||
* Install Ceph, configure and bootstrap a new cluster if `bootstrap=yes` is set, configure the monitor and manager daemons, and start up the cluster ready for the addition of OSDs via the client interface (coordinators only).
|
||||
|
||||
* Install, configure, and if `bootstrap=yes` is set, bootstrap a Zookeeper cluster (coordinators only).
|
||||
|
||||
* Install, configure, and if `bootstrap=yes` is set`, bootstrap a Patroni PostgreSQL cluster for the PowerDNS aggregator (coordinators only).
|
||||
|
||||
* Install and configure Libvirt.
|
||||
|
||||
* Install and configure FRRouting.
|
||||
|
||||
* Install and configure the main PVC daemon and API client, including initializing the PVC cluster (`pvc init`).
|
||||
|
||||
## Completion
|
||||
|
||||
Once the entire playbook has run for the first time against a given host, the host will be rebooted to apply all the configured services. On startup, the system should immediately launch the PVC daemon, check in to the Zookeeper cluster, and become ready. The node will be in `flushed` state on its first boot; the administrator will need to run `pvc node unflush <node>` to set the node into active state ready to handle virtual machines.
|
@ -1,7 +0,0 @@
|
||||
# PVC API architecture
|
||||
|
||||
The PVC API is a standalone client application for PVC. It interfaces directly with the Zookeeper database to manage state.
|
||||
|
||||
The API is built using Flask and is packaged in the Debian package `pvc-client-api`. The API depends on the common client functions of the `pvc-client-common` package as does the CLI client.
|
||||
|
||||
Details of the API interface can be found in [the manual](/manuals/api).
|
@ -1,7 +0,0 @@
|
||||
# PVC CLI architecture
|
||||
|
||||
The PVC CLI is a standalone client application for PVC. It interfaces with the PVC API, via a configurable list of clusters with customizable hosts, ports, addresses, and authentication.
|
||||
|
||||
The CLI is build using Click and is packaged in the Debian package `pvc-client-cli`. The CLI does not depend on any other PVC components and can be used independently on arbitrary systems.
|
||||
|
||||
The CLI is self-documenting, however [the manual](/manuals/cli) details the required configuration.
|
@ -1,56 +1,110 @@
|
||||
# PVC Cluster Architecture considerations
|
||||
|
||||
This document contains considerations the administrator should make when preparing for and building a PVC cluster. It includes four main subsections: node specifications, storage specifications, network layout, and node layout, plus a fifth section featuring diagrams of 3 example topologies.
|
||||
- [PVC Cluster Architecture considerations](#pvc-cluster-architecture-considerations)
|
||||
* [Node Specifications: Considering the size of nodes](#node-specifications--considering-the-size-of-nodes)
|
||||
* [Storage Layout: Ceph and OSDs](#storage-layout--ceph-and-osds)
|
||||
* [Physical network considerations](#physical-network-considerations)
|
||||
* [Network Layout: Considering the required networks](#network-layout--considering-the-required-networks)
|
||||
+ [PVC system networks](#pvc-system-networks)
|
||||
- [Upstream: Connecting the nodes to the wider world](#upstream--connecting-the-nodes-to-the-wider-world)
|
||||
- [Cluster: Connecting the nodes with each other](#cluster--connecting-the-nodes-with-each-other)
|
||||
- [Storage: Connecting Ceph OSD with each other](#storage--connecting-ceph-osd-with-each-other)
|
||||
+ [PVC client networks](#pvc-client-networks)
|
||||
- [Bridged (unmanaged) Client Networks](#bridged--unmanaged--client-networks)
|
||||
- [VXLAN (managed) Client Networks](#vxlan--managed--client-networks)
|
||||
- [Other Client Networks](#other-client-networks)
|
||||
* [Node Layout: Considering how nodes are laid out](#node-layout--considering-how-nodes-are-laid-out)
|
||||
+ [Node Functions: Coordinators versus Hypervisors](#node-functions--coordinators-versus-hypervisors)
|
||||
- [Coordinators](#coordinators)
|
||||
* [The Primary Coordinator](#the-primary-coordinator)
|
||||
- [Hypervisors](#hypervisors)
|
||||
+ [Geographic redundancy](#geographic-redundancy)
|
||||
* [Example Configurations](#example-configurations)
|
||||
+ [Basic 3-node cluster](#basic-3-node-cluster)
|
||||
+ [Mid-sized 8-node cluster with 3 coordinators](#mid-sized-8-node-cluster-with-3-coordinators)
|
||||
+ [Large 17-node cluster with 5 coordinators](#large-17-node-cluster-with-5-coordinators)
|
||||
|
||||
This document contains considerations the administrator should make when preparing for and building a PVC cluster. It is important that prospective PVC administrators read this document *thoroughly* before deploying a cluster to ensure they understand the requirements, caveats, and important details about how PVC operates.
|
||||
|
||||
## Node Specifications: Considering the size of nodes
|
||||
|
||||
Each node in the cluster must be sized based on the needs of the cluster and the load placed on it. In general, taller nodes are better for performance and allow for a more powerful cluster on less hardware, though the needs of each specific environment and workload my affect this differently.
|
||||
PVC nodes, especially coordinator nodes, run a significant number of software applications in addition to the virtual machines (VMs). It is therefore extremely important to size the systems correctly for the expected workload while planning both for redundancy and future capacity. In general, taller nodes are better for performance, providing a more powerful cluster on fewer physical machines, though each workload may be different in this regard.
|
||||
|
||||
At a bare minimum, each node should have the following specifications:
|
||||
The following table provides bare-minimum, recommended, and optimal specifications for a cluster. The bare-minimum specification would be suitable for testing or a small lab, but not for production use. The recommended specification would be suitable for a small production cluster running lightweight VMs. The optimal cluster would be the ideal for running a demanding, resource-intensive production cluster. Note that these are the minimum resources required, and actual usage will likely require more resources than those presented here - this is mostly to show the minimums for each specified configuration (i.e. testing, light production, heavy production).
|
||||
|
||||
* 12x 1.8GHz or better Intel/AMD cores from at least the Nehalem/Bulldozer eras (~2008 or newer)
|
||||
* 48GB of RAM
|
||||
* 2x 1Gbps Ethernet interfaces
|
||||
* 1x 10GB+ system disk (SSD/HDD/USB/SD/eMMC flash)
|
||||
* 1x 400GB+ OSD data disk (SSD)
|
||||
| Resource | Minimum | Recommended | Optimal|
|
||||
|--------------|-----------|---------------|----------|
|
||||
| CPU generation | Intel Nehalem (2008) / AMD Bulldozer (2011) | Intel Sandy Bridge (2011) / AMD Naples (2017) | Intel Haswell (2013) / AMD Rome (2019) |
|
||||
| CPU cores (per node) | 4x @1.8GHz | 8x @2.0GHz | 12x @2.2 GHz |
|
||||
| RAM (per node) | 16GB | 48GB | 64GB |
|
||||
| System disk (SSD/HDD/USB/SD/eMMC) | 1x 10GB | 2x 10GB RAID-1 | 2x 32GB RAID-1 |
|
||||
| Data disk (SSD only) | 1x 200GB | 1x 400GB | 2x 400GB |
|
||||
| Network interfaces | 1x 1Gbps | 2x 1Gbps LAG | 2x 10Gbps LAG |
|
||||
| Total CPU cores (healthy) | 12x | 24x | 36x |
|
||||
| Total CPU cores (n-1) | 8x | 16x | 24x |
|
||||
| Total RAM (healthy) | 48GB | 144GB | 192GB |
|
||||
| Total RAM (n-1) | 32GB | 96GB | 128GB |
|
||||
| Total disk space | 200GB | 400GB | 800GB |
|
||||
|
||||
For a cluster of 3 such nodes, this will provide a total of:
|
||||
Of these totals, some amount of CPU and RAM will be used by the storage subsystem and the PVC daemons themselves, meaning that the total available for virtual machines is slightly less. Generally, each OSD data disk will consume 1 vCPU at load and 1-2GB RAM, so nodes should be sized not only according to the VM workload, but the number of storage disks per node. Additionally the coordinator databases will use additional RAM and CPU resources of up to 1-4GB per node, though there is generally little need to spec coordinators any larger than non-coordinator nodes and the VM automatic node selection process will take used RAM into account by default.
|
||||
|
||||
* 36 total CPU cores
|
||||
* 144GB RAM
|
||||
* 400GB usable Ceph storage space (`copies=3`)
|
||||
### System Disks
|
||||
|
||||
Of this, some amount of CPU and RAM will be used by the storage subsystem and the PVC daemons themselves, meaning that the total available for virtual machines is slightly less. Generally, each OSD data disk will consume 1 vCPU at load and 1-2GB RAM, so nodes should be sized not only according to the VM workload, but the number of storage disks per node. Additionally the coordinator databases will use additional RAM and CPU resources of up to 1-4GB per node, though there is generally little need to spec coordinators any larger than non-coordinator nodes and the VM automatic node selection process will take used RAM into account by default.
|
||||
The system disk(s) chosen are important to consider, especially for coordinators. Ideally, an SSD, or two SSDs in RAID-1/mirroring are recommended for system disks. This helps ensure optimal performance for the system (e.g. swap space) and PVC components such as databases as well as the Ceph caches.
|
||||
|
||||
It is possible to run PVC on slower disks, for instance HDDs, USB drives, SD cards, or eMMC flash. For hypervisor-only nodes this will be acceptable; however for coordinators be advised that the performance of some aspects of the system may suffer as a result, and the longevity of the storage media must be carefully considered. RAID-1/mirroring is strongly recommended for these storage media as well, especially on coordinator nodes.
|
||||
|
||||
### n-1 Redundancy
|
||||
|
||||
Care should be taken to examine the "healthy" versus "n-1" total resource availability. Under normal operation, PVC will use all available resources and distribute VMs across all cluster nodes. However, during single-node failure or maintenance conditions, all VMs will be required to run on the remaining hypervisors. Thus, care should be taken during planning to ensure there is sufficient resources for the expected workload of the cluster.
|
||||
|
||||
The general rule for available resource capacity planning can be though of as "1/3 of the total disks space, 2/3 of the total RAM, 2/3 of the total CPUs" for a 3-node cluster.
|
||||
|
||||
For memory provisioning of VMs, PVC will warn the administrator, via a Degraded cluster state, if the "n-1" RAM quantity is exceeded by the total maximum allocation of all running VMs. This situation can be worked around with sufficient swap space on nodes to ensure there is overflow, however the warning cannot be overridden. If nodes are of mismatched sizes, the "n-1" RAM quantity is calculated by removing (one of) the largest node in the cluster and adding the remaining nodes' RAM counts together.
|
||||
|
||||
### Operating System and Architecture
|
||||
|
||||
As an underlying OS, only Debian 10 "Buster" is supported by PVC. This is the operating system installed by the PVC [node installer](https://github.com/parallelvirtualcluster/pvc-installer) and expected by the PVC [Ansible configuration system](https://github.com/parallelvirtualcluster/pvc-ansible). Ubuntu or other Debian-derived distributions may work, but are not officially supported. PVC also makes use of a custom repository to provide the PVC software and an updated version of Ceph beyond what is available in the base operating system, and this is only compatible officially with Debian 10 "Buster".
|
||||
|
||||
Currently, only the `amd64` (Intel 64 or AMD64) architecture is officially supported by PVC. Given the cross-platform nature of Python and the various software components in Debian, it may work on `armhf` or `arm64` systems as well, however this has not been tested by the author.
|
||||
|
||||
## Storage Layout: Ceph and OSDs
|
||||
|
||||
The Ceph subsystem of PVC, if enabled, creates a "hyperconverged" setup whereby storage and VM hypervisor functions are collocated onto the same physical servers. The performance of the storage must be taken into account when sizing the nodes as mentioned above.
|
||||
The Ceph subsystem of PVC, if enabled, creates a "hyperconverged" cluster whereby storage and VM hypervisor functions are collocated onto the same physical servers. The performance of the storage must be taken into account when sizing the nodes as mentioned above.
|
||||
|
||||
The Ceph system is laid out similar to the other daemons. The Ceph Monitor and Manager functions are delegated to the Coordinators over the cluster network, with all nodes connecting to these hosts to obtain the CRUSH maps and select OSD disks. OSDs are then distributed on all hosts, including non-coordinator hypervisors, and communicate with clients over the cluster network and with each other (for replication, failover, etc.) over the storage network.
|
||||
The Ceph system is laid out similar to the other daemons. The Ceph Monitor and Manager functions are delegated to the Coordinators over the storage network, with all nodes connecting to these hosts to obtain the CRUSH maps and select OSD disks. OSDs are then distributed on all hosts, including non-coordinator hypervisors, and communicate with clients and each other over the storage network.
|
||||
|
||||
PVC Ceph pools make use of the replication mechanism of Ceph to store multiple copies of each object, thus ensuring that data is always available even when a host is unavailable. Note that, mostly for performance reasons related to rewrites and random I/O, erasure coding is *not* supported in PVC.
|
||||
Disks must be balanced across all nodes. Therefore, adding 1 disk to 1 node is not sufficient; 1 disk must be added to all nodes at the same time for the available space to increase. Ideally, disk sizes should also be identical across all storage disks, though the weight of each disk can be configured when added to the cluster. Generally speaking, fewer larger disks are preferable to many smaller disks to minimize storage resource utilization, however slightly more storage performance can be gained from using many small disks; the administrator should therefore always aim to choose the biggest disks they can and grow by adding more identical disks as space or performance needs grow.
|
||||
|
||||
The default replication level for a new pool is `copies=3, mincopies=2`. This will store 3 copies of each object, with a host-level failure domain, and will allow I/O as long as 2 copies are available. Thus, in a cluster of any size, all data is fully available even if a single host becomes unavailable. It will however use 3x the space for each piece of data stored, which must be considered when sizing the disk space for the cluster: a pool in this configuration, running on 3 nodes each with a single 400GB disk, will effectively have 400GB of total space available for use. Additionally, new disks must be added in groups of 3 spread across the nodes in order to be able to take advantage of the additional space, since each write will require creating 3 copies across each of the 3 hosts.
|
||||
PVC Ceph pools make use of the replication mechanism of Ceph to store multiple copies of each object, thus ensuring that data is always available even when a host is unavailable. Only "replica"-based Ceph redundancy is supported by PVC; erasure coded pools are not supported due to major performance impacts related to rewrites and random I/O.
|
||||
|
||||
The default replication level for a new pool is `copies=3, mincopies=2`. This will store 3 copies of each object, with a host-level failure domain, and will allow I/O as long as 2 copies are available. Thus, in a cluster of any size, all data is fully available even if a single host becomes unavailable. It will however use 3x the space for each piece of data stored, which must be considered when sizing the disk space for the cluster: a pool in this configuration, running on 3 nodes each with a single 400GB disk, will effectively have 400GB of total space available for use. As mentioned above, new disks must also be added in groups across nodes equal to the total number of `copies` to ensure new space is usable.
|
||||
|
||||
Non-default values can also be set at pool creation time. For instance, one could create a `copies=3, mincopies=1` pool, which would allow I/O with two hosts down but leaves the cluster susceptible to a write hole should a disk fail in this state. Alternatively, for more resilience, one could create a `copies=4, mincopies=2` pool, which will allow 2 hosts to fail without a write hole, but would consume 4x the space for each piece of data stored and require new disks to be added in groups of 4 instead. Practically any combination of values is possible, however these 3 are the most relevant for most use-cases, and for most, especially small, clusters, the default is sufficient to provide solid redundancy and guard against host failures until the administrator can respond.
|
||||
|
||||
Replication levels cannot be changed within PVC once a pool is created, however they can be changed via manual Ceph commands on a coordinator should the administrator require this. In any case, the administrator should carefully consider sizing, failure domains, and performance when selecting storage devices to ensure the right level of resiliency versus data usage for their use-case and cluster size.
|
||||
|
||||
## Network Layout: Considering the required networks
|
||||
## Physical network considerations
|
||||
|
||||
A PVC cluster needs, at minimum, 3 networks in order to function properly. Each of the three networks and its function is detailed below. An additional two sections cover the two kinds of client networks and the considerations for them.
|
||||
|
||||
### Physical network considerations
|
||||
|
||||
At a minimum, a production PVC cluster should use at least two 1Gbps Ethernet interfaces, connected in an LACP or active-backup bond on one or more switches. On top of this bond, the various cluster networks should be configured as vLANs.
|
||||
At a minimum, a production PVC cluster should use at least two 1Gbps Ethernet interfaces, connected in an LACP or active-backup bond on one or more switches. On top of this bond, the various cluster networks are configured as 802.3q vLANs. PVC is be able to support configurations without 802.1q vLAN support using multiple physical interfaces and no bridged client networks, but this is strongly discouraged due to the added complexity this introduces; the switches chosen for the cluster should include these requirements as a minimum.
|
||||
|
||||
More advanced physical network layouts are also possible. For instance, one could have two isolated networks. On the first network, each node has two 10Gbps Ethernet interfaces, which are combined in a bond across two redundant switch fabrics and that handle the upstream and cluster networks. On the second network, each node has an additional two 10Gbps, which are also combined in a bond across the redundant switch fabrics and handle the storage network. This configuration could support up to 10Gbps of aggregate client traffic while also supporting 10Gbps of aggregate storage traffic. Even more complex network configurations are possible if the cluster requires such performance. See the [Example Configurations](#example-configurations) section for some examples.
|
||||
|
||||
### Upstream: Connecting the nodes to the wider world
|
||||
Only Ethernet networks are supported by PVC. More exotic interconnects such as Infiniband are not supported by default, and must be manually set up with Ethernet (e.g. EoIB) layers on top to be usable with PVC.
|
||||
|
||||
The upstream network functions as the main upstream for the cluster nodes, providing Internet access and a way to route managed client network traffic out of the cluster. In most deployments, this should be an RFC1918 private subnet with an upstream router which can perform NAT translation and firewalling as required, both for the cluster nodes themselves, but also for the RFC1918 managed client networks.
|
||||
PVC manages the IP addressing of all nodes itself and creates the required addresses during node daemon startup; thus, the on-boot network configuration of each interface should be set to "manual" with no IP addresses configured.
|
||||
|
||||
The floating IP address in the upstream network can be used as a single point of communication with the PVC cluster from other upstream sources, for instance to access the DNS aggregator instance or the API if configured. For this reason the network should generally be protected from unauthorized access via a firewall.
|
||||
## Network Layout: Considering the required networks
|
||||
|
||||
A PVC cluster needs several different networks to operate properly; they are described in detail below and the administrator should ensure they account for all the required networks when planning the cluster.
|
||||
|
||||
### PVC system networks
|
||||
|
||||
#### Upstream: Connecting the nodes to the wider world
|
||||
|
||||
The upstream network functions as the main upstream for the cluster nodes, providing Internet access and a way to route managed client network traffic out of the cluster. In most deployments, this should be an RFC1918 private subnet with an upstream router which can perform NAT translation and firewalling as required, both for the cluster nodes themselves, and also for any RFC1918 managed client networks.
|
||||
|
||||
The floating IP address in the cluster network can be used as a single point of communication with the active primary node, for instance to access the DNS aggregator instance or the management API. PVC provides only limited access control mechanisms to the API interface, so the upstream network should always be protected by a firewall; running PVC directly accessible on the Internet is strongly discouraged and may post a serious security risk, and all access should be restricted to the smallest possible set of remote systems.
|
||||
|
||||
Nodes in this network are generally assigned static IP addresses which are configured at node install time and in the [Ansible deployment configuration](/manuals/ansible).
|
||||
|
||||
@ -80,52 +134,62 @@ For example, for a 3+ node cluster, up to about 90 nodes, the following configur
|
||||
|
||||
For even larger clusters, a `/23` or even larger network may be used.
|
||||
|
||||
### Cluster: Connecting the nodes with each other
|
||||
#### Cluster: Connecting the nodes with each other
|
||||
|
||||
The cluster network is an unrouted private network used by the PVC nodes to communicate with each other for database access, Libvirt migration, and storage client traffic. It is also used as the underlying interface for the BGP EVPN VXLAN interfaces used by managed client networks.
|
||||
The cluster network is an unrouted private network used by the PVC nodes to communicate with each other for database access and Libvirt migrations. It is also used as the underlying interface for the BGP EVPN VXLAN interfaces used by managed client networks.
|
||||
|
||||
The floating IP address in the cluster network can be used as a single point of communication with the primary node.
|
||||
The floating IP address in the cluster network can be used as a single point of communication with the active primary node.
|
||||
|
||||
Nodes in this network are generally assigned IPs automatically based on their node number (e.g. node1 at `.1`, node2 at `.2`, etc.). The network should be large enough to include all nodes sequentially.
|
||||
|
||||
Generally the cluster network should be completely separate from the upstream network, either a separate physical interface (or set of bonded interfaces) or a dedicated vLAN on an underlying physical device.
|
||||
Generally the cluster network should be completely separate from the upstream network, either a separate physical interface (or set of bonded interfaces) or a dedicated vLAN on an underlying physical device, but they can be collocated if required.
|
||||
|
||||
### Storage: Connecting Ceph OSD with each other
|
||||
#### Storage: Connecting Ceph daemons with each other and with OSDs
|
||||
|
||||
The storage network is an unrouted private network used by the PVC node storage OSDs to communicated with each other, without using the main cluster network and introducing potentially large amounts of traffic there.
|
||||
The storage network is an unrouted private network used by the PVC node storage OSDs to communicated with each other, for Ceph management functionality, and for QEMU-to-Ceph disk access, without using the main cluster network and introducing potentially large amounts of traffic there.
|
||||
|
||||
Nodes in this network are generally assigned IPs automatically based on their node number. The network should be large enough to include all nodes sequentially.
|
||||
The floating IP address in the storage network can be used as a single point of communication with the active primary node, though this will generally be of little use.
|
||||
|
||||
The administrator may choose to collocate the storage network on the same physical interface as the cluster network, or on a separate physical interface. This should be decided based on the size of the cluster and the perceived ratios of client network versus storage traffic. In large (>3 node) or storage-intensive clusters, this network should generally be a separate set of fast physical interfaces, separate from both the upstream and cluster networks, in order to maximize and isolate the storage bandwidth.
|
||||
Nodes in this network are generally assigned IPs automatically based on their node number (e.g. node1 at `.1`, node2 at `.2`, etc.). The network should be large enough to include all nodes sequentially.
|
||||
|
||||
### Bridged (unmanaged) Client Networks
|
||||
The administrator may choose to collocate the storage network on the same physical interface as the cluster network, or on a separate physical interface. This should be decided based on the size of the cluster and the perceived ratios of client network versus storage traffic. In large (>3 node) or storage-intensive clusters, this network should generally be a separate set of fast physical interfaces, separate from both the upstream and cluster networks, in order to maximize and isolate the storage bandwidth. If the administrator does choose to colocate these networks, they may also share the same IP address, thus eliminating any distinction between the Cluster and Storage networks. The PVC software handles this natively when the Cluster and Storage IPs of a node are identical.
|
||||
|
||||
The first type of client network is the unmanaged bridged network. These networks have a separate vLAN on the device underlying the cluster network, which is created when the network is configured. VMs are then bridged into this vLAN.
|
||||
### PVC client networks
|
||||
|
||||
With this client network type, PVC does no management of the network. This is left entirely to the administrator. It requires switch support and the configuration of the vLANs on the switchports of each node's cluster network before enabling the network.
|
||||
#### Bridged (unmanaged) Client Networks
|
||||
|
||||
### VXLAN (managed) Client Networks
|
||||
The first type of client network is the unmanaged bridged network. These networks have a separate vLAN on the device underlying the other networks, which is created when the network is configured. VMs are then bridged into this vLAN.
|
||||
|
||||
The second type of client network is the managed VXLAN network. These networks make use of BGP EVPN, managed by route reflection on the coordinators, to create virtual layer 2 Ethernet tunnels between all nodes in the cluster. VXLANs are then run on top of these virtual layer 2 tunnels, with the primary PVC node providing routing, DHCP, and DNS functionality to the network via a single IP address.
|
||||
With this client network type, PVC does no management of the network. This is left entirely to the administrator. It requires switch support and the configuration of the vLANs on the switchports of each node's physical interfaces before enabling the network.
|
||||
|
||||
With this client network type, PVC is in full control of the network. No vLAN configuration is required on the switchports of each node's cluster network as the virtual layer 2 tunnel travels over the cluster layer 3 network. All client network traffic destined for outside the network will exit via the upstream network of the primary coordinator node; note that this may introduce a bottleneck and tromboning if there is a large amount of external and/or inter-network traffic on the cluster. The administrator should consider this carefully when sizing the cluster network.
|
||||
Generally, the same physical network interface will underly both the cluster networks as well as bridged client networks. PVC does however support specifying a separate physical device for bridged client networks, for instance to separate these networks onto a different physical interface from the main cluster networks.
|
||||
|
||||
#### VXLAN (managed) Client Networks
|
||||
|
||||
The second type of client network is the managed VXLAN network. These networks make use of BGP EVPN, managed by route reflection on the coordinators, to create virtual layer 2 Ethernet tunnels between all nodes in the cluster. VXLANs are then run on top of these virtual layer 2 tunnels, with the active primary PVC node providing routing, DHCP, and DNS functionality to the network via a single IP address.
|
||||
|
||||
With this client network type, PVC is in full control of the network. No vLAN configuration is required on the switchports of each node's physical interfaces, as the virtual layer 2 tunnel travels over the cluster layer 3 network. All client network traffic destined for outside the network will exit via the upstream network interface of the active primary coordinator node.
|
||||
|
||||
NOTE: These networks may introduce a bottleneck and tromboning if there is a large amount of external and/or inter-network traffic on the cluster. The administrator should consider this carefully when deciding whether to use managed or bridged networks and properly evaluate the inter-network traffic requirements.
|
||||
|
||||
#### Other Client Networks
|
||||
|
||||
Future PVC versions may support other client network types, such as direct-routing between VMs.
|
||||
|
||||
## Node Layout: Considering how nodes are laid out
|
||||
|
||||
A production-grade PVC cluster requires 3 nodes running the PVC Daemon software. 1-node clusters are supported for very small clusters, homelabs, and testing, but provide no redundancy; they should not be used in production situations.
|
||||
A production-grade PVC cluster requires at least 3 nodes running the PVC Daemon software. 1-node clusters are supported for very small clusters, home labs, and testing, but provide no redundancy; they should not be used in production situations.
|
||||
|
||||
### Node Functions: Coordinators versus Hypervisors
|
||||
|
||||
Within PVC, a given node can have one of two main functions: it can be a "Coordinator" or a "Hypervisor".
|
||||
Within PVC, a given node can have one of two main functions: "Coordinator" or "Hypervisor".
|
||||
|
||||
#### Coordinators
|
||||
|
||||
Coordinators are a special set of 3 or 5 nodes with additional functionality. The coordinator nodes run, in addition to the PVC software itself, a number of databases and additional functions which are required by the whole cluster. An odd number of coordinators is *always* required to maintain quorum, though there are diminishing returns when creating more than 3. These additional functions are:
|
||||
|
||||
0. The Zookeeper database containing the cluster state and configuration
|
||||
0. The DNS aggregation Patroni PostgreSQL database containing DNS records for all client networks
|
||||
0. The Zookeeper database cluster containing the cluster state and configuration
|
||||
0. The Patroni PostgreSQL database cluster containing DNS records for managed networks and provisioning configurations
|
||||
0. The FRR EBGP route reflectors and upstream BGP peers
|
||||
|
||||
In addition to these functions, coordinators can usually also run all other PVC node functions.
|
||||
@ -134,20 +198,38 @@ The set of coordinator nodes is generally configured at cluster bootstrap, initi
|
||||
|
||||
##### The Primary Coordinator
|
||||
|
||||
Within the set of coordinators, a single primary coordinator is elected and shuffles around the cluster as nodes start and stop. Which coordinator is primary can be selected by the administrator manually, or via a simple election process within the cluster. Once a node becomes primary, it will remain so until told not to be. This coordinator is responsible for some additional functionality in addition to the other coordinators. These additional functions are:
|
||||
Within the set of coordinators, a single primary coordinator is elected at cluster startup and as nodes start and stop, or in response to administrative commands. Once a node becomes primary, it will remain so until it stops or is told not to be. This coordinator is responsible for some additional functionality in addition to the other coordinators. These additional functions are:
|
||||
|
||||
0. The floating IPs in the main networks
|
||||
0. The default gateway IP for each managed client network
|
||||
0. The DNSMasq instance handling DHCP and DNS for each managed client network
|
||||
0. The API and provisioner clients and workers
|
||||
|
||||
PVC gracefully handles transitioning primary coordinator state, to minimize downtime. Workers will continue to operate on the old coordinator if available after a switchover and the administrator should be aware of any active tasks before switching the active primary coordinator.
|
||||
|
||||
#### Hypervisors
|
||||
|
||||
Hypervisors consist of all other PVC nodes in the cluster. For small clusters (3 nodes), there will generally not be any non-coordinator nodes, though adding a 4th would require it to be a hypervisor to preserve quorum between the coordinators. Larger clusters should generally add new nodes as Hypervisors rather than coordinators to preserve the small set of coordinator nodes previously mentioned.
|
||||
|
||||
### Geographic redundancy
|
||||
|
||||
PVC supports geographic redundancy of nodes in order to facilitate disaster recovery scenarios when uptime is critical. Functionally, PVC behaves the same regardless of whether the 3 or more coordinators are in the same physical location, or remote physical locations.
|
||||
|
||||
When using geographic redundancy, there are several caveats to keep in mind:
|
||||
|
||||
* The Ceph storage subsystem is latency-sensitive. With the default replication configuration, at least 2 writes must succeed for the write to return a success, so the total write latency of a write on any system will be equal to the maximum latency between any two nodes. It is recommended to keep all PVC nodes as "close" as possible latency-wise or storage performance may suffer.
|
||||
|
||||
* The inter-node PVC networks must be layer-2 networks (broadcast domains). These networks must be spanned to all nodes in all locations.
|
||||
|
||||
* The number of sites and positioning of coordinators at those sites is important. A majority (at least 2 in a 3-coordinator cluster, or 3 in a 5-coordinator) of coordinators must be able to reach each other in a failure scenario for the cluster as a whole to remain functional. Thus, configurations such as 2 + 1 or 3 + 2 splits across 2 sites do *not* provide full redundancy, and the whole cluster will be down if the majority site is down. It is thus recommended to always have an odd number of sites to match the odd number of coordinators, for instance a 1 + 1 + 1 or 2 + 2 + 1 configuration. Also note that all hypervisors much be able to reach the majority coordinator group or their storage will be impacted as well.
|
||||
|
||||
* Even if the PVC software itself is in an unmanageable state, VMs will continue to run if at all possible. However, since the storage subsystem makes use of the same quorum, losing more than half of the nodes will very likely result in storage interruption as well, which will affect running VMs.
|
||||
|
||||
If these requirements cannot be fulfilled, it may be best to have separate PVC clusters at each site and handle service redundancy at a higher layer to avoid a major disruption.
|
||||
|
||||
## Example Configurations
|
||||
|
||||
This section provides diagrams of 3 possible node configurations, providing an idea of the sort of cluster topologies supported by PVC.
|
||||
This section provides diagrams of 3 possible node configurations. These diagrams can be extrapolated out to almost any possible configuration and number of nodes.
|
||||
|
||||
#### Basic 3-node cluster
|
||||
|
||||
|
@ -1,53 +0,0 @@
|
||||
# PVC Node Daemon architecture
|
||||
|
||||
The PVC Node Daemon is the heart of the PVC system and runs on each node to manage the state of the node and its configured resources. The daemon connects directly to the Zookeeper cluster for coordination and state.
|
||||
|
||||
The node daemon is build using Python 3.X and is packaged in the Debian package `pvc-daemon`.
|
||||
|
||||
Configuration of the daemon is documented in [the manual](/manuals/daemon), however it is recommended to use the [Ansible configuration interface](/manuals/ansible) to configure the PVC system for you from scratch.
|
||||
|
||||
## Overall architecture
|
||||
|
||||
The PVC daemon is object-oriented - each cluster resource is represented by an Object, which is then present on each node in the cluster. This allows state changes to be reflected across the entire cluster should their data change.
|
||||
|
||||
During startup, the system scans the Zookeeper database and sets up the required objects. The database is then watched in real-time for additional changes to the database information.
|
||||
|
||||
## Startup sequence
|
||||
|
||||
The daemon startup sequence is documented below. The main daemon entry-point is `Daemon.py` inside the `pvcd` folder, which is called from the `pvcd.py` stub file.
|
||||
|
||||
0. The configuration is read from `/etc/pvc/pvcd.yaml` and the configuration object set up.
|
||||
|
||||
0. Any required filesystem directories, mostly dynamic directories, are created.
|
||||
|
||||
0. The logger is set up. If file logging is enabled, this is the state when the first log messages are written.
|
||||
|
||||
0. Host networking is configured based on the `pvcd.yaml` configuration file. In a normal cluster, this is the point where the node will become reachable on the network as all networking is handled by the PVC node daemon.
|
||||
|
||||
0. Sysctl tweaks are applied to the host system, to enable routing/forwarding between nodes via the host.
|
||||
|
||||
0. The node determines its coordinator state and starts the required daemons if applicable. In a normal cluster, this is the point where the dependent services such as Zookeeper, FRR, and Ceph become available. After this step, the daemon waits 5 seconds before proceeding to give these daemons a chance to start up.
|
||||
|
||||
0. The daemon connects to the Zookeeper cluster and starts its listener. If the Zookeeper cluster is unavailable, it will wait some time before abandoning the attempt and starting again from step 1.
|
||||
|
||||
0. Termination handling/cleanup is configured.
|
||||
|
||||
0. The node checks if it is already present in the Zookeeper cluster; if not, it will add itself to the database. Initial static options are also updated in the database here. The daemon state transitions from `stop` to `init`.
|
||||
|
||||
0. The node checks if Libvirt is accessible.
|
||||
|
||||
0. The node starts up the NFT firewall if applicable and configures the base rule-set.
|
||||
|
||||
0. The node ensures that `dnsmasq` is stopped (legacy check, might be safe to remove eventually).
|
||||
|
||||
0. The node begins setting up the object representations of resources, in order:
|
||||
|
||||
a. Node entries
|
||||
|
||||
b. Network entries, creating client networks and starting them as required.
|
||||
|
||||
c. Domain (VM) entries, starting up the VMs as required.
|
||||
|
||||
d. Ceph storage entries (OSDs, Pools, Volumes, Snapshots).
|
||||
|
||||
0. The node activates its keepalived timer and begins sending keepalive updates to the cluster. The daemon state transitions from `init` to `run` and the system has started fully.
|
49
docs/faq.md
Normal file
49
docs/faq.md
Normal file
@ -0,0 +1,49 @@
|
||||
# Frequently Asked Questions about Parallel Virtual Cluster
|
||||
|
||||
## General Questions
|
||||
|
||||
### What is it?
|
||||
|
||||
PVC is a virtual machine management suite designed around high-availability. It can be considered an alternative to ProxMox, VMWare, Nutanix, and other similar solutions that manage not just the VMs, but the surrounding infrastructure as well.
|
||||
|
||||
### Why would you make this?
|
||||
|
||||
The full story can be found in the [about page](https://parallelvirtualcluster.readthedocs.io/en/latest/about), but after becoming frustrated by numerous other management tools, I discovered that what I wanted didn't exist as FLOSS software, so I built it myself.
|
||||
|
||||
### Is PVC right for me?
|
||||
|
||||
PVC might be right for you if your requirements are:
|
||||
|
||||
1. You need KVM-based VMs.
|
||||
2. You want management of storage and networking (a.k.a. "batteries-included") in the same tool.
|
||||
3. You want hypervisor-level redundancy, able to tolerate hypervisor downtime seamlessly, for all elements of the stack.
|
||||
|
||||
I built PVC for my homelab first, found a perfect usecase with my employer, and think it might be useful to you too.
|
||||
|
||||
### Is 3 hypervisors really the minimum?
|
||||
|
||||
For a redundant cluster, yes. PVC requires a majority quorum for several subsystems, and the smallest possible majority quorum is 2/3. That said, you can run PVC on a single node for testing/lab purposes without host-level reundancy, should you wish to do so.
|
||||
|
||||
## Feature Questions
|
||||
|
||||
### Does PVC support Docker/Kubernetes/LXC/etc.
|
||||
|
||||
No. PVC supports only KVM VMs. To run Docker containers, etc., you would need to run a VM which then runs your containers.
|
||||
|
||||
### Does PVC have a WebUI?
|
||||
|
||||
Not yet. Right now, PVC management is done almost exclusively with an API and the included CLI interface to that API. A WebUI could and likely will be built in the future, but I'm not a frontend developer.
|
||||
|
||||
## Storage Questions
|
||||
|
||||
### Can I use RAID-5 with PVC?
|
||||
|
||||
The short answer is no. The long answer is: Ceph, the storage backend used by PVC, does support "erasure coded" pools which implement a RAID-5-like functionality. PVC does not support this for several reasons. If you use PVC, you must accept at the very least a 2x storage penalty, and for true safety and resiliency a 3x storage penalty, for VM storage. This is a trade-off of the architecture.
|
||||
|
||||
### Can I use spinning HDDs with PVC?
|
||||
|
||||
You can, but you won't like the results. SSDs are effectively required to obtain any sort of reasonable performance when running multiple VMs. Ideally, datacentre-grade SSDs as well, due to their significantly increased write endurance.
|
||||
|
||||
### What Ceph version does PVC use?
|
||||
|
||||
PVC requires Ceph 14.x (Nautilus). The official PVC repository includes Ceph 14.2.8. Debian Buster by default includes only 12.x (Luminous).
|
@ -1,4 +1,4 @@
|
||||
# PVC - The Parallel Virtual Cluster suite
|
||||
# PVC - The Parallel Virtual Cluster system
|
||||
|
||||
<p align="center">
|
||||
<img alt="Logo banner" src="https://git.bonifacelabs.ca/uploads/-/system/project/avatar/135/pvc_logo.png"/>
|
||||
@ -9,21 +9,30 @@
|
||||
<a href="https://parallelvirtualcluster.readthedocs.io/en/latest/?badge=latest"><img alt="Documentation Status" src="https://readthedocs.org/projects/parallelvirtualcluster/badge/?version=latest"/></a>
|
||||
</p>
|
||||
|
||||
PVC is a suite of Python 3 tools to manage virtualized clusters. It provides a fully-functional private cloud based on four key principles:
|
||||
PVC is a KVM+Ceph+Zookeeper-based, Free Software, scalable, redundant, self-healing, and self-managing private cloud solution designed with administrator simplicity in mind. It is built from the ground-up to be redundant at the host layer, allowing the cluster to gracefully handle the loss of nodes or their components, both due to hardware failure or due to maintenance. It is able to scale from a minimum of 3 nodes up to 12 or more nodes, while retaining performance and flexibility, allowing the administrator to build a small cluster today and grow it as needed.
|
||||
|
||||
1. Be Free Software Forever (or Bust)
|
||||
2. Be Opinionated and Efficient and Pick The Best Software
|
||||
3. Be Scalable and Redundant but Not Hyperscale
|
||||
4. Be Simple To Use, Configure, and Maintain
|
||||
The major goal of PVC is to be administrator friendly, providing the power of Enterprise-grade private clouds like OpenStack, Nutanix, and VMWare to homelabbers, SMBs, and small ISPs, without the cost or complexity. It believes in picking the best tool for a job and abstracting it behind the cluster as a whole, freeing the administrator from the boring and time-consuming task of selecting the best component, and letting them get on with the things that really matter. Administration can be done from a simple CLI or via a RESTful API capable of building full-featured web frontends or additional applications, taking a self-documenting approach to keep the administrator learning curvet as low as possible. Setup is easy and straightforward with an [ISO-based node installer](https://github.com/parallelvirtualcluster/pvc-installer) and [Ansible role framework](https://github.com/parallelvirtualcluster/pvc-ansible) designed to get a cluster up and running as quickly as possible. Build your cloud in an hour, grow it as you need, and never worry about it: just add physical servers.
|
||||
|
||||
It is designed to be an administrator-friendly but extremely powerful and rich modern private cloud system, but without the feature bloat and complexity of tools like OpenStack. With PVC, an administrator can provision, manage, and update a cluster of dozens or more hypervisors running thousands of VMs using a simple CLI tool, HTTP API, or web interface. PVC is based entirely on Debian GNU/Linux and Free-and-Open-Source tools, providing the glue to bootstrap, provision and manage the cluster, then getting out of the administrators' way.
|
||||
## Getting Started
|
||||
|
||||
Your cloud, the best way; just add physical servers.
|
||||
|
||||
To get started with PVC, read the [Cluster Architecture document](/architecture/cluster), then see [Installing](/installing) for details on setting up a set of PVC nodes, using [`pvc-ansible`](/manuals/ansible) to configure and bootstrap a cluster, and managing it with the [`pvc` cli](/manuals/cli) or [HTTP API](/manuals/api). For details on the project, its motivation, and architectural details, see [the About page](/about).
|
||||
To get started with PVC, read the [Cluster Architecture document](https://parallelvirtualcluster.readthedocs.io/en/latest/architecture/cluster/) and [Frequently Asked Questions](https://parallelvirtualcluster.readthedocs.io/en/latest/faq/), then see [Installing](https://parallelvirtualcluster.readthedocs.io/en/latest/installing) for details on setting up a set of PVC nodes, using the [PVC Ansible](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/ansible) framework to configure and bootstrap a cluster, and managing it with the [`pvc` CLI tool](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/cli) or [RESTful HTTP API](https://parallelvirtualcluster.readthedocs.io/en/latest/manuals/api). For details on the project, its motivation, and architectural details, see [the About page](https://parallelvirtualcluster.readthedocs.io/en/latest/about).
|
||||
|
||||
## Changelog
|
||||
|
||||
#### v0.9.0
|
||||
|
||||
Numerous small improvements and bugfixes. This release is suitable for general use and is pre-release-quality software.
|
||||
|
||||
This release introduces an updated version scheme; all future stable releases until 1.0.0 is ready will be made under this 0.9.z naming. This does not represent semantic versioning and all changes (feature, improvement, or bugfix) will be considered for inclusion in this release train.
|
||||
|
||||
#### v0.8
|
||||
|
||||
Numerous improvements and bugfixes. This release is suitable for general use and is pre-release-quality software.
|
||||
|
||||
#### v0.7
|
||||
|
||||
Numerous improvements and bugfixes, revamped documentation. This release is suitable for general use and is beta-quality software.
|
||||
|
||||
#### v0.6
|
||||
|
||||
Numerous improvements and bugfixes, full implementation of the provisioner, full implementation of the API CLI client (versus direct CLI client). This release is suitable for general use and is beta-quality software.
|
||||
|
@ -6,6 +6,8 @@ This guide will walk you through setting up a simple 3-node PVC cluster from scr
|
||||
|
||||
### Part One - Preparing for bootstrap
|
||||
|
||||
0. Read through the [Cluster Architecture documentation](/architecture/cluster). This documentation details the requirements and conventions of a PVC cluster, and is important to understand before proceeding.
|
||||
|
||||
0. Download the latest copy of the [`pvc-installer`](https://github.com/parallelvirtualcluster/pvc-installer) and [`pvc-ansible`](https://github.com/parallelvirtualcluster/pvc-ansible) repositories to your local machine.
|
||||
|
||||
0. In `pvc-ansible`, create an initial `hosts` inventory, using `hosts.default` as a template. You can manage multiple PVC clusters ("sites") from the Ansible repository easily, however for simplicity you can use the simple name `cluster` for your initial site. Define the 3 hostnames you will use under the site group; usually the provided names of `pvchv1`, `pvchv2`, and `pvchv3` are sufficient, though you may use any hostname pattern you wish. It is *very important* that the names all contain a sequential number, however, as this is used by various components.
|
||||
@ -124,122 +126,11 @@ All steps in this and following sections can be performed using either the CLI c
|
||||
|
||||
0. Verify the client networks are reachable by pinging the managed gateway from outside the cluster.
|
||||
|
||||
### Part Six - Setting nodes ready and deploying a VM
|
||||
|
||||
This section walks through deploying a simple Debian VM to the cluster with Debootstrap. Note that as of PVC version `0.5`, this is still a manual process, though automated deployment of VMs based on configuration templates and image snapshots is planned for version `0.6`. This section can be used as a basis for a scripted installer, or a manual process as the administrator sees fit.
|
||||
|
||||
0. Set all 3 nodes to `ready` state, allowing them to run virtual machines. The general command is:
|
||||
`$ pvc node ready <node>`
|
||||
|
||||
0. Create an RBD image for the VM. The general command is:
|
||||
`$ pvc storage volume add <pool> <name> <size>`
|
||||
### You're Done!
|
||||
|
||||
For example, to create a 20GB disk for a VM called `test1` in the previously-configured pool `vms`, run the command as follows:
|
||||
`$ pvc storage volume add vms test1_disk0 20G`
|
||||
Congratulations, you now have a basic PVC storage cluster, ready to run your VMs.
|
||||
|
||||
0. Verify the RBD image was created:
|
||||
`$ pvc storage volume list`
|
||||
|
||||
0. On one of the PVC nodes, for example `pvchv1`, map the RBD volume to the local system:
|
||||
`$ ceph rbd map vms/test1_disk0`
|
||||
|
||||
The resulting disk device will be available at `/dev/rbd/vms/test1_disk0` or `/dev/rbd0`.
|
||||
|
||||
0. Create a filesystem on the block device, for example `ext4`:
|
||||
`$ mkfs -t ext4 /dev/rbd/vms/test1_disk0`
|
||||
|
||||
0. Create a temporary directory and mount the block device to it, using `mount` to find the directory:
|
||||
`$ mount /dev/rbd/vms/test1_disk0 $( mktemp -d )`
|
||||
`$ mount | grep rbd`
|
||||
|
||||
0. Run a `debootstrap` installation to the volume:
|
||||
`$ debootstrap buster <temporary_mountpoint> http://ftp.mirror.debian.org/debian`
|
||||
|
||||
0. Bind mount the various required directories to the new system:
|
||||
`$ mount --bind /dev <temporary_mountpoint>/dev`
|
||||
`$ mount --bind /dev/pts <temporary_mountpoint>/dev/pts`
|
||||
`$ mount --bind /proc <temporary_mountpoint>/proc`
|
||||
`$ mount --bind /sys <temporary_mountpoint>/sys`
|
||||
`$ mount --bind /run <temporary_mountpoint>/run`
|
||||
|
||||
0. Using `chroot`, configure the VM system as required, for instance installing packages or adding users:
|
||||
`$ chroot <temporary_mountpoint>`
|
||||
`[chroot]$ ...`
|
||||
|
||||
0. Install the GRUB bootloader in the VM system, and install Grub to the RBD device:
|
||||
`[chroot]$ apt install grub-pc`
|
||||
`[chroot]$ grub-install /dev/rbd/vms/test1_disk0`
|
||||
|
||||
0. Exit the `chroot` environment, unmount the temporary mountpoint, and unmap the RBD device:
|
||||
`[chroot]$ exit`
|
||||
`$ umount <temporary_mountpoint>`
|
||||
`$ rbd unmap /dev/rd0`
|
||||
|
||||
0. Prepare a Libvirt XML configuration, obtaining the required Ceph storage secret and a new random VM UUID first. This example provides a very simple VM with 1 vCPU, 1GB RAM, the previously-configured network `100`, and the previously-configured disk `vms/test1_disk0`:
|
||||
`$ virsh secret-list`
|
||||
`$ uuidgen`
|
||||
`$ $EDITOR /tmp/test1.xml`
|
||||
|
||||
```
|
||||
<domain type='kvm'>
|
||||
<name>test1</name>
|
||||
<uuid>[INSERT GENERATED UUID]</uuid>
|
||||
<description>Testing VM</description>
|
||||
<memory unit='MiB'>1024</memory>
|
||||
<vcpu>1</vcpu>
|
||||
<os>
|
||||
<type arch='x86_64' machine='pc-i440fx-2.7'>hvm</type>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
<pae/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>restart</on_crash>
|
||||
<devices>
|
||||
<emulator>/usr/bin/kvm</emulator>
|
||||
<controller type='usb' index='0'/>
|
||||
<controller type='pci' index='0' model='pci-root'/>
|
||||
<serial type='pty'/>
|
||||
<console type='pty'/>
|
||||
<disk type='network' device='disk'>
|
||||
<driver name='qemu' discard='unmap'/>
|
||||
<auth username='libvirt'>
|
||||
<secret type='ceph' uuid='[INSERT CEPH STORAGE SECRET]'/>
|
||||
</auth>
|
||||
<source protocol='rbd' name='vms/test1_disk0'>
|
||||
<host name='[INSERT FIRST COORDINATOR CLUSTER NETWORK FQDN' port='6789'/>
|
||||
<host name='[INSERT FIRST COORDINATOR CLUSTER NETWORK FQDN' port='6789'/>
|
||||
<host name='[INSERT FIRST COORDINATOR CLUSTER NETWORK FQDN' port='6789'/>
|
||||
</source>
|
||||
<target dev='sda' bus='scsi'/>
|
||||
</disk>
|
||||
<interface type='bridge'>
|
||||
<mac address='52:54:00:12:34:56'/>
|
||||
<source bridge='vmbr100'/>
|
||||
<model type='virtio'/>
|
||||
</interface>
|
||||
<controller type='scsi' index='0' model='virtio-scsi'/>
|
||||
</devices>
|
||||
</domain>
|
||||
```
|
||||
|
||||
**NOTE:** This Libvirt XML is only a sample; it should be modified to fit the specifics of the VM. Alternatively to manual configuration, one can use a tool like `virt-manager` to generate valid Libvirt XML configurations for PVC to use.
|
||||
|
||||
0. Define the VM in the PVC cluster:
|
||||
`$ pvc vm define /tmp/test1.xml`
|
||||
|
||||
0. Verify the VM is present in the cluster:
|
||||
`$ pvc vm info test1`
|
||||
|
||||
0. Start the VM and watch the console log:
|
||||
`$ pvc vm start test1`
|
||||
`$ pvc vm log -f test1`
|
||||
|
||||
If all has gone well until this point, you should now be able to watch your new VM boot on the cluster, grab DHCP from the managed network, and run away doing its thing. You could now, for instance, move it permanently to another node with the `pvc vm move -t <node> test1` command, or temporarily with the `pvc vm migrate -t <node> test1` command and back again with the `pvc vm unmigrate test` command.
|
||||
|
||||
For more details on what to do next, see the [CLI manual](/manuals/cli) for a full list of management functions, SSH into your new VM, and start provisioning more. Your new private cloud is now here!
|
||||
For next steps, see the [Provisioner manual](/manuals/provisioner) for details on how to use the PVC provisioner to create new Virtual Machines, as well as the [CLI manual](/manuals/cli) and [API manual](/manuals/api) for details on day-to-day usage of PVC.
|
||||
|
@ -1,3 +1,47 @@
|
||||
# PVC Ansible architecture
|
||||
|
||||
The PVC Ansible setup and management framework is written in Ansible. It consists of two roles: `base` and `pvc`.
|
||||
|
||||
## Base role
|
||||
|
||||
The Base role configures a node to a specific, standard base Debian system, with a number of PVC-specific tweaks. Some examples include:
|
||||
|
||||
* Installing the custom PVC repository at Boniface Labs.
|
||||
|
||||
* Removing several unnecessary packages and installing numerous additional packages.
|
||||
|
||||
* Automatically configuring network interfaces based on the `group_vars` configuration.
|
||||
|
||||
* Configuring several general `sysctl` settings for optimal performance.
|
||||
|
||||
* Installing and configuring rsyslog, postfix, ntpd, ssh, and fail2ban.
|
||||
|
||||
* Creating the users specified in the `group_vars` configuration.
|
||||
|
||||
* Installing custom MOTDs, bashrc files, vimrc files, and other useful configurations for each user.
|
||||
|
||||
The end result is a standardized "PVC node" system ready to have the daemons installed by the PVC role.
|
||||
|
||||
## PVC role
|
||||
|
||||
The PVC role configures all the dependencies of PVC, including storage, networking, and databases, then installs the PVC daemon itself. Specifically, it will, in order:
|
||||
|
||||
* Install Ceph, configure and bootstrap a new cluster if `bootstrap=yes` is set, configure the monitor and manager daemons, and start up the cluster ready for the addition of OSDs via the client interface (coordinators only).
|
||||
|
||||
* Install, configure, and if `bootstrap=yes` is set, bootstrap a Zookeeper cluster (coordinators only).
|
||||
|
||||
* Install, configure, and if `bootstrap=yes` is set`, bootstrap a Patroni PostgreSQL cluster for the PowerDNS aggregator (coordinators only).
|
||||
|
||||
* Install and configure Libvirt.
|
||||
|
||||
* Install and configure FRRouting.
|
||||
|
||||
* Install and configure the main PVC daemon and API client, including initializing the PVC cluster (`pvc init`).
|
||||
|
||||
## Completion
|
||||
|
||||
Once the entire playbook has run for the first time against a given host, the host will be rebooted to apply all the configured services. On startup, the system should immediately launch the PVC daemon, check in to the Zookeeper cluster, and become ready. The node will be in `flushed` state on its first boot; the administrator will need to run `pvc node unflush <node>` to set the node into active state ready to handle virtual machines.
|
||||
|
||||
# PVC Ansible configuration manual
|
||||
|
||||
This manual documents the various `group_vars` configuration options for the `pvc-ansible` framework. We assume that the administrator is generally familiar with Ansible and its operation.
|
||||
|
@ -1,3 +1,11 @@
|
||||
# PVC API architecture
|
||||
|
||||
The PVC API is a standalone client application for PVC. It interfaces directly with the Zookeeper database to manage state.
|
||||
|
||||
The API is built using Flask and is packaged in the Debian package `pvc-client-api`. The API depends on the common client functions of the `pvc-client-common` package as does the CLI client.
|
||||
|
||||
Details of the API interface can be found in [the manual](/manuals/api).
|
||||
|
||||
# PVC HTTP API manual
|
||||
|
||||
The PVC HTTP API client is built with Flask, a Python framework for creating API interfaces, and run directly with the PyWSGI framework. It interfaces directly with the Zookeeper cluster to send and receive information about the cluster. It supports authentication configured statically via tokens in the configuration file as well as SSL. It also includes the provisioner client, an optional section that can be used to create VMs automatically using a set of templates and standardized scripts.
|
||||
@ -8,7 +16,7 @@ The [`pvc-ansible`](https://github.com/parallelvirtualcluster/pvc-ansible) frame
|
||||
|
||||
### SSL
|
||||
|
||||
The API accepts SSL certificate and key files via the `pvc-api.yaml` configuration to enable SSL support for the API, which protects the data and query values from snooping or tampering. SSL is strongly recommended if using the API outside of a trusted local area network.
|
||||
The API accepts SSL certificate and key files via the `pvcapid.yaml` configuration to enable SSL support for the API, which protects the data and query values from snooping or tampering. SSL is strongly recommended if using the API outside of a trusted local area network.
|
||||
|
||||
### API authentication
|
||||
|
||||
@ -148,7 +156,7 @@ curl -X GET http://localhost:7370/api/v1/provisioner/status/<task-id>
|
||||
|
||||
## API Daemon Configuration
|
||||
|
||||
The API is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVC_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvc-api.yaml`.
|
||||
The API is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVC_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvcapid.yaml`.
|
||||
|
||||
### Conventions
|
||||
|
||||
@ -156,7 +164,7 @@ The API is configured using a YAML configuration file which is passed in to the
|
||||
|
||||
* Settings may `depends` on other settings. This indicates that, if one setting is enabled, the other setting is very likely `required` by that setting.
|
||||
|
||||
### `pvc-api.yaml`
|
||||
### `pvcapid.yaml`
|
||||
|
||||
Example configuration:
|
||||
|
||||
@ -185,9 +193,9 @@ pvc:
|
||||
database:
|
||||
host: 10.100.0.252
|
||||
port: 5432
|
||||
name: pvcprov
|
||||
user: pvcprov
|
||||
pass: pvcprov
|
||||
name: pvcapi
|
||||
user: pvcapi
|
||||
pass: pvcapi
|
||||
queue:
|
||||
host: localhost
|
||||
port: 6379
|
||||
@ -286,7 +294,7 @@ The port of the PostgreSQL instance for the Provisioner database. Should always
|
||||
|
||||
* *required*
|
||||
|
||||
The database name for the Provisioner database. Should always be `pvcprov`.
|
||||
The database name for the Provisioner database. Should always be `pvcapi`.
|
||||
|
||||
##### `provisioner` → `database` → `user`
|
||||
|
||||
|
@ -1,10 +1,18 @@
|
||||
# PVC CLI architecture
|
||||
|
||||
The PVC CLI is a standalone client application for PVC. It interfaces with the PVC API, via a configurable list of clusters with customizable hosts, ports, addresses, and authentication.
|
||||
|
||||
The CLI is build using Click and is packaged in the Debian package `pvc-client-cli`. The CLI does not depend on any other PVC components and can be used independently on arbitrary systems.
|
||||
|
||||
The CLI is self-documenting, however [the manual](/manuals/cli) details the required configuration.
|
||||
|
||||
# PVC CLI client manual
|
||||
|
||||
The PVC CLI client is built with Click, a Python framework for creating self-documenting CLI applications. It interfaces with the PVC API.
|
||||
|
||||
Use the `-h` option at any level of the `pvc` CLI command to receive help about the available commands and options.
|
||||
|
||||
Before using the CLI on a non-PVC node system, at least one cluster must be added using the `pvc cluster` subcommands. Running the CLI on hosts which also run the PVC API (via its configuration at `/etc/pvc/pvc-api.yaml`) uses the special `local` cluster, reading information from the API configuration, by default.
|
||||
Before using the CLI on a non-PVC node system, at least one cluster must be added using the `pvc cluster` subcommands. Running the CLI on hosts which also run the PVC API (via its configuration at `/etc/pvc/pvcapid.yaml`) uses the special `local` cluster, reading information from the API configuration, by default.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
@ -1,10 +1,64 @@
|
||||
# PVC Node Daemon architecture
|
||||
|
||||
The PVC Node Daemon is the heart of the PVC system and runs on each node to manage the state of the node and its configured resources. The daemon connects directly to the Zookeeper cluster for coordination and state.
|
||||
|
||||
The node daemon is build using Python 3.X and is packaged in the Debian package `pvc-daemon`.
|
||||
|
||||
Configuration of the daemon is documented in [the manual](/manuals/daemon), however it is recommended to use the [Ansible configuration interface](/manuals/ansible) to configure the PVC system for you from scratch.
|
||||
|
||||
## Overall architecture
|
||||
|
||||
The PVC daemon is object-oriented - each cluster resource is represented by an Object, which is then present on each node in the cluster. This allows state changes to be reflected across the entire cluster should their data change.
|
||||
|
||||
During startup, the system scans the Zookeeper database and sets up the required objects. The database is then watched in real-time for additional changes to the database information.
|
||||
|
||||
## Startup sequence
|
||||
|
||||
The daemon startup sequence is documented below. The main daemon entry-point is `Daemon.py` inside the `pvcnoded` folder, which is called from the `pvcnoded.py` stub file.
|
||||
|
||||
0. The configuration is read from `/etc/pvc/pvcnoded.yaml` and the configuration object set up.
|
||||
|
||||
0. Any required filesystem directories, mostly dynamic directories, are created.
|
||||
|
||||
0. The logger is set up. If file logging is enabled, this is the state when the first log messages are written.
|
||||
|
||||
0. Host networking is configured based on the `pvcnoded.yaml` configuration file. In a normal cluster, this is the point where the node will become reachable on the network as all networking is handled by the PVC node daemon.
|
||||
|
||||
0. Sysctl tweaks are applied to the host system, to enable routing/forwarding between nodes via the host.
|
||||
|
||||
0. The node determines its coordinator state and starts the required daemons if applicable. In a normal cluster, this is the point where the dependent services such as Zookeeper, FRR, and Ceph become available. After this step, the daemon waits 5 seconds before proceeding to give these daemons a chance to start up.
|
||||
|
||||
0. The daemon connects to the Zookeeper cluster and starts its listener. If the Zookeeper cluster is unavailable, it will wait some time before abandoning the attempt and starting again from step 1.
|
||||
|
||||
0. Termination handling/cleanup is configured.
|
||||
|
||||
0. The node checks if it is already present in the Zookeeper cluster; if not, it will add itself to the database. Initial static options are also updated in the database here. The daemon state transitions from `stop` to `init`.
|
||||
|
||||
0. The node checks if Libvirt is accessible.
|
||||
|
||||
0. The node starts up the NFT firewall if applicable and configures the base rule-set.
|
||||
|
||||
0. The node ensures that `dnsmasq` is stopped (legacy check, might be safe to remove eventually).
|
||||
|
||||
0. The node begins setting up the object representations of resources, in order:
|
||||
|
||||
a. Node entries
|
||||
|
||||
b. Network entries, creating client networks and starting them as required.
|
||||
|
||||
c. Domain (VM) entries, starting up the VMs as required.
|
||||
|
||||
d. Ceph storage entries (OSDs, Pools, Volumes, Snapshots).
|
||||
|
||||
0. The node activates its keepalived timer and begins sending keepalive updates to the cluster. The daemon state transitions from `init` to `run` and the system has started fully.
|
||||
|
||||
# PVC Node Daemon manual
|
||||
|
||||
The PVC node daemon ins build with Python 3 and is run directly on nodes. For details of the startup sequence and general layout, see the [architecture document](/architecture/daemon).
|
||||
|
||||
## Configuration
|
||||
|
||||
The Daemon is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVCD_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvcd.yaml`.
|
||||
The Daemon is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVCD_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvcnoded.yaml`.
|
||||
|
||||
For most deployments, the management of the configuration file is handled entirely by the [PVC Ansible framework](/manuals/ansible) and should not be modified directly. Many options from the Ansible framework map directly into the configuration options in this file.
|
||||
|
||||
@ -14,7 +68,7 @@ For most deployments, the management of the configuration file is handled entire
|
||||
|
||||
* Settings may `depends` on other settings. This indicates that, if one setting is enabled, the other setting is very likely `required` by that setting.
|
||||
|
||||
### `pvcd.yaml`
|
||||
### `pvcnoded.yaml`
|
||||
|
||||
Example configuration:
|
||||
|
||||
@ -58,9 +112,9 @@ pvc:
|
||||
database:
|
||||
host: localhost
|
||||
port: 5432
|
||||
name: pvcprov
|
||||
user: pvcprov
|
||||
pass: pvcprovPassw0rd
|
||||
name: pvcapi
|
||||
user: pvcapi
|
||||
pass: pvcapiPassw0rd
|
||||
system:
|
||||
fencing:
|
||||
intervals:
|
||||
@ -225,7 +279,7 @@ The port of the PostgreSQL instance for the Provisioner database. Should always
|
||||
|
||||
* *required*
|
||||
|
||||
The database name for the Provisioner database. Should always be `pvcprov`.
|
||||
The database name for the Provisioner database. Should always be `pvcapi`.
|
||||
|
||||
##### `metadata` → `database` → `user`
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# PVC Provisioner API architecture
|
||||
# PVC Provisioner manual
|
||||
|
||||
The PVC provisioner is a subsection of the main PVC API. IT interfaces directly with the Zookeeper database using the common client functions, and with the Patroni PostgreSQL database to store details. The provisioner also interfaces directly with the Ceph storage cluster, for mapping volumes, creating filesystems, and installing guests.
|
||||
|
||||
@ -10,10 +10,18 @@ The purpose of the Provisioner API is to provide a convenient way for administra
|
||||
|
||||
The Provisioner allows the administrator to constuct descriptions of VMs, called profiles, which include system resource specifications, network interfaces, disks, cloud-init userdata, and installation scripts. These profiles are highly modular, allowing the administrator to specify arbitrary combinations of the mentioned VM features with which to build new VMs.
|
||||
|
||||
Currently, the provisioner supports creating VMs based off of installation scripts, or by cloning existing volumes. Future versions of PVC will allow the uploading of arbitrary images (either disk or ISO images) to cluster volumes, permitting even more flexibility in the installation of VMs.
|
||||
The provisioner supports creating VMs based off of installation scripts, by cloning existing volumes, and by uploading OVA image templates to the cluster.
|
||||
|
||||
Examples in the following sections use the CLI exclusively for demonstration purposes. For details of the underlying API calls, please see the [API interface reference](/manuals/api-reference.html).
|
||||
|
||||
# Deploying VMs from OVA images
|
||||
|
||||
PVC supports deploying virtual machines from industry-standard OVA images. OVA images can be uploaded to the cluster with the `pvc provisioner ova` commands, and deployed via the created profile(s) using the `pvc provisioner create` command. Additionally, the profile(s) can be modified to suite your specific needs via the provisioner template system detailed below.
|
||||
|
||||
# Deploying VMs from provisioner scripts
|
||||
|
||||
PVC supports deploying virtual machines using administrator-provided scripts, using templates, profiles, and Cloud-init userdata to control the deployment process as desired. This deployment method permits the administrator to deploy POSIX-like systems such as Linux or BSD directly from a companion tool such as `debootstrap` on-demand and with maximum flexibility.
|
||||
|
||||
## Templates
|
||||
|
||||
The PVC Provisioner features three categories of templates to specify the resources allocated to the virtual machine. They are: System Templates, Network Templates, and Disk Templates.
|
@ -62,6 +62,11 @@
|
||||
"description": "The total number of snapshots in the storage cluster",
|
||||
"type": "integer"
|
||||
},
|
||||
"storage_health": {
|
||||
"description": "The overall storage cluster health",
|
||||
"example": "Optimal",
|
||||
"type": "string"
|
||||
},
|
||||
"upstream_ip": {
|
||||
"description": "The cluster upstream IP address in CIDR format",
|
||||
"example": "10.0.0.254/24",
|
||||
@ -422,13 +427,17 @@
|
||||
"memory": {
|
||||
"properties": {
|
||||
"allocated": {
|
||||
"description": "The total amount of RAM allocated to domains in MB",
|
||||
"description": "The total amount of RAM allocated to running domains in MB",
|
||||
"type": "integer"
|
||||
},
|
||||
"free": {
|
||||
"description": "The total free RAM on the node in MB",
|
||||
"type": "integer"
|
||||
},
|
||||
"provisioned": {
|
||||
"description": "The total amount of RAM provisioned to all domains (regardless of state) on this node in MB",
|
||||
"type": "integer"
|
||||
},
|
||||
"total": {
|
||||
"description": "The total amount of node RAM in MB",
|
||||
"type": "integer"
|
||||
@ -554,6 +563,48 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ova": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"description": "Internal provisioner OVA ID",
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"description": "OVA name",
|
||||
"type": "string"
|
||||
},
|
||||
"volumes": {
|
||||
"items": {
|
||||
"id": "ova_volume",
|
||||
"properties": {
|
||||
"disk_id": {
|
||||
"description": "Disk identifier",
|
||||
"type": "string"
|
||||
},
|
||||
"disk_size_gb": {
|
||||
"description": "Disk size in GB",
|
||||
"type": "string"
|
||||
},
|
||||
"pool": {
|
||||
"description": "Pool containing the OVA volume",
|
||||
"type": "string"
|
||||
},
|
||||
"volume_format": {
|
||||
"description": "OVA image format",
|
||||
"type": "string"
|
||||
},
|
||||
"volume_name": {
|
||||
"description": "Storage volume containing the OVA image",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"type": "list"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"pool": {
|
||||
"properties": {
|
||||
"name": {
|
||||
@ -757,6 +808,146 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"storagebenchmark": {
|
||||
"properties": {
|
||||
"benchmark_result": {
|
||||
"properties": {
|
||||
"test_name": {
|
||||
"properties": {
|
||||
"bandwidth": {
|
||||
"properties": {
|
||||
"max": {
|
||||
"description": "The maximum bandwidth (KiB/s) measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"mean": {
|
||||
"description": "The mean bandwidth (KiB/s) measurement",
|
||||
"type": "string (float)"
|
||||
},
|
||||
"min": {
|
||||
"description": "The minimum bandwidth (KiB/s) measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"numsamples": {
|
||||
"description": "The number of samples taken during the test",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"stdev": {
|
||||
"description": "The standard deviation of bandwidth",
|
||||
"type": "string (float)"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"cpu": {
|
||||
"properties": {
|
||||
"ctxsw": {
|
||||
"description": "The number of context switches during the test",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"majfault": {
|
||||
"description": "The number of major page faults during the test",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"minfault": {
|
||||
"description": "The number of minor page faults during the test",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"system": {
|
||||
"description": "The percentage of test time spent in system (kernel) space",
|
||||
"type": "string (float percentage)"
|
||||
},
|
||||
"user": {
|
||||
"description": "The percentage of test time spent in user space",
|
||||
"type": "string (float percentage)"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"iops": {
|
||||
"properties": {
|
||||
"max": {
|
||||
"description": "The maximum IOPS measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"mean": {
|
||||
"description": "The mean IOPS measurement",
|
||||
"type": "string (float)"
|
||||
},
|
||||
"min": {
|
||||
"description": "The minimum IOPS measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"numsamples": {
|
||||
"description": "The number of samples taken during the test",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"stdev": {
|
||||
"description": "The standard deviation of IOPS",
|
||||
"type": "string (float)"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"latency": {
|
||||
"properties": {
|
||||
"max": {
|
||||
"description": "The maximum latency measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"mean": {
|
||||
"description": "The mean latency measurement",
|
||||
"type": "string (float)"
|
||||
},
|
||||
"min": {
|
||||
"description": "The minimum latency measurement",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"stdev": {
|
||||
"description": "The standard deviation of latency",
|
||||
"type": "string (float)"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"overall": {
|
||||
"properties": {
|
||||
"bandwidth": {
|
||||
"description": "The average bandwidth (KiB/s)",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"iops": {
|
||||
"description": "The average IOPS",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"iosize": {
|
||||
"description": "The total size of the benchmark data",
|
||||
"type": "string (integer)"
|
||||
},
|
||||
"runtime": {
|
||||
"description": "The total test time in milliseconds",
|
||||
"type": "string (integer)"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"id": {
|
||||
"description": "The database ID of the test result",
|
||||
"type": "string (containing integer)"
|
||||
},
|
||||
"job": {
|
||||
"description": "The job name (an ISO date) of the test result",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"system-template": {
|
||||
"properties": {
|
||||
"id": {
|
||||
@ -866,9 +1057,25 @@
|
||||
"description": "The full name of the volume in \"pool/volume\" format",
|
||||
"type": "string"
|
||||
},
|
||||
"rd_bytes": {
|
||||
"description": "The number of read bytes from the volume",
|
||||
"type": "integer"
|
||||
},
|
||||
"rd_req": {
|
||||
"description": "The number of read requests from the volume",
|
||||
"type": "integer"
|
||||
},
|
||||
"type": {
|
||||
"description": "The type of volume",
|
||||
"type": "string"
|
||||
},
|
||||
"wr_bytes": {
|
||||
"description": "The number of write bytes to the volume",
|
||||
"type": "integer"
|
||||
},
|
||||
"wr_req": {
|
||||
"description": "The number of write requests to the volume",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@ -902,6 +1109,51 @@
|
||||
"description": "The assigned RAM of the VM in MB",
|
||||
"type": "integer"
|
||||
},
|
||||
"memory_stats": {
|
||||
"properties": {
|
||||
"actual": {
|
||||
"description": "The total active memory of the VM in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"available": {
|
||||
"description": "The total amount of usable memory as seen by the domain in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"last_update": {
|
||||
"description": "Timestamp of the last update of statistics, in seconds",
|
||||
"type": "integer"
|
||||
},
|
||||
"major_fault": {
|
||||
"description": "The number of major page faults",
|
||||
"type": "integer"
|
||||
},
|
||||
"minor_fault": {
|
||||
"description": "The number of minor page faults",
|
||||
"type": "integer"
|
||||
},
|
||||
"rss": {
|
||||
"description": "The Resident Set Size of the process running the domain in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"swap_in": {
|
||||
"description": "The amount of swapped in data in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"swap_out": {
|
||||
"description": "The amount of swapped out data in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"unused": {
|
||||
"description": "The amount of memory left completely unused by the system in kB",
|
||||
"type": "integer"
|
||||
},
|
||||
"usable": {
|
||||
"description": "How much the balloon can be inflated without pushing the guest system to swap in kB",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"migrated": {
|
||||
"description": "Whether the VM has been migrated, either \"no\" or \"from <last_node>\"",
|
||||
"type": "string"
|
||||
@ -922,6 +1174,22 @@
|
||||
"description": "The virtual network device model",
|
||||
"type": "string"
|
||||
},
|
||||
"rd_bytes": {
|
||||
"description": "The number of read bytes on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"rd_drops": {
|
||||
"description": "The number of read drops on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"rd_errors": {
|
||||
"description": "The number of read errors on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"rd_packets": {
|
||||
"description": "The number of read packets on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"source": {
|
||||
"description": "The parent network bridge on the node",
|
||||
"type": "string"
|
||||
@ -929,6 +1197,22 @@
|
||||
"type": {
|
||||
"description": "The PVC network type",
|
||||
"type": "string"
|
||||
},
|
||||
"wr_bytes": {
|
||||
"description": "The number of write bytes on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"wr_drops": {
|
||||
"description": "The number of write drops on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"wr_errors": {
|
||||
"description": "The number of write errors on the interface",
|
||||
"type": "integer"
|
||||
},
|
||||
"wr_packets": {
|
||||
"description": "The number of write packets on the interface",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@ -974,6 +1258,23 @@
|
||||
"description": "The assigned vCPUs of the VM",
|
||||
"type": "integer"
|
||||
},
|
||||
"vcpu_stats": {
|
||||
"properties": {
|
||||
"cpu_time": {
|
||||
"description": "The active CPU time for all vCPUs",
|
||||
"type": "integer"
|
||||
},
|
||||
"system_time": {
|
||||
"description": "vCPU system time",
|
||||
"type": "integer"
|
||||
},
|
||||
"user_time": {
|
||||
"description": "vCPU user time",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"vcpu_topology": {
|
||||
"description": "The topology of the assigned vCPUs in Sockets/Cores/Threads format",
|
||||
"type": "string"
|
||||
@ -1946,6 +2247,27 @@
|
||||
"name": "limit",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Limit results to nodes in the specified daemon state",
|
||||
"in": "query",
|
||||
"name": "daemon_state",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Limit results to nodes in the specified coordinator state",
|
||||
"in": "query",
|
||||
"name": "coordinator_state",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Limit results to nodes in the specified domain state",
|
||||
"in": "query",
|
||||
"name": "domain_state",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -2162,6 +2484,12 @@
|
||||
"name": "start_vm",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "Script install() function keywork argument in \"arg=data\" format; may be specified multiple times to add multiple arguments",
|
||||
"in": "query",
|
||||
"name": "arg",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -2190,6 +2518,160 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/provisioner/ova": {
|
||||
"get": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "An OVA name search limit; fuzzy by default, use ^/$ to force exact matches",
|
||||
"in": "query",
|
||||
"name": "limit",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"items": {
|
||||
"$ref": "#/definitions/ova"
|
||||
},
|
||||
"type": "list"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Return a list of OVA sources",
|
||||
"tags": [
|
||||
"provisioner"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "<br/>The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Storage pool name",
|
||||
"in": "query",
|
||||
"name": "pool",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "OVA name on the cluster (usually identical to the OVA file name)",
|
||||
"in": "query",
|
||||
"name": "name",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Size of the OVA file in bytes",
|
||||
"in": "query",
|
||||
"name": "ova_size",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Upload an OVA image to the cluster",
|
||||
"tags": [
|
||||
"provisioner"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/provisioner/ova/{ova}": {
|
||||
"delete": {
|
||||
"description": "",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Remove ova {ova}",
|
||||
"tags": [
|
||||
"provisioner"
|
||||
]
|
||||
},
|
||||
"get": {
|
||||
"description": "",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/ova"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Return information about OVA image {ova}",
|
||||
"tags": [
|
||||
"provisioner"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "<br/>The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Storage pool name",
|
||||
"in": "query",
|
||||
"name": "pool",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Size of the OVA file in bytes",
|
||||
"in": "query",
|
||||
"name": "ova_size",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Upload an OVA image to the cluster",
|
||||
"tags": [
|
||||
"provisioner"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/provisioner/profile": {
|
||||
"get": {
|
||||
"description": "",
|
||||
@ -2228,39 +2710,57 @@
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Profile type",
|
||||
"enum": [
|
||||
"provisioner",
|
||||
"ova"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "profile_type",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Script name",
|
||||
"in": "query",
|
||||
"name": "script",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "System template name",
|
||||
"in": "query",
|
||||
"name": "system_template",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Network template name",
|
||||
"in": "query",
|
||||
"name": "network_template",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Storage template name",
|
||||
"in": "query",
|
||||
"name": "storage_template",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Userdata template name",
|
||||
"in": "query",
|
||||
"name": "userdata",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "OVA image source",
|
||||
"in": "query",
|
||||
"name": "ova",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
@ -2336,6 +2836,17 @@
|
||||
"post": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Profile type",
|
||||
"enum": [
|
||||
"provisioner",
|
||||
"ova"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "profile_type",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Script name",
|
||||
"in": "query",
|
||||
@ -2371,6 +2882,13 @@
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "OVA image source",
|
||||
"in": "query",
|
||||
"name": "ova",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Script install() function keywork argument in \"arg=data\" format; may be specified multiple times to add multiple arguments",
|
||||
"in": "query",
|
||||
@ -3558,6 +4076,77 @@
|
||||
"tags": [
|
||||
"provisioner / template"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "vCPU count for VM",
|
||||
"in": "query",
|
||||
"name": "vcpus",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"description": "vRAM size in MB for VM",
|
||||
"in": "query",
|
||||
"name": "vram",
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"description": "Whether to enable serial console for VM",
|
||||
"in": "query",
|
||||
"name": "serial",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "Whether to enable VNC console for VM",
|
||||
"in": "query",
|
||||
"name": "vnc",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "VNC bind address when VNC console is enabled",
|
||||
"in": "query",
|
||||
"name": "vnc_bind",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "CSV list of node(s) to limit VM assignment to",
|
||||
"in": "query",
|
||||
"name": "node_limit",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Selector to use for VM node assignment on migration/move",
|
||||
"in": "query",
|
||||
"name": "node_selector",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Whether to start VM with node ready state (one-time)",
|
||||
"in": "query",
|
||||
"name": "node_autostart",
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Modify an existing system template {template}",
|
||||
"tags": [
|
||||
"provisioner / template"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/provisioner/userdata": {
|
||||
@ -3780,6 +4369,57 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/benchmark": {
|
||||
"get": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "A single job name to limit results to",
|
||||
"in": "query",
|
||||
"name": "job",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/storagebenchmark"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "List results from benchmark jobs",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The PVC storage pool to benchmark",
|
||||
"in": "query",
|
||||
"name": "pool",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"description": "The Celery job ID of the benchmark (unused elsewhere)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Execute a storage benchmark against a storage pool",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/option": {
|
||||
"post": {
|
||||
"description": "",
|
||||
@ -4691,6 +5331,52 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/volume/{pool}/{volume}/upload": {
|
||||
"post": {
|
||||
"description": "<br/>The body must be a form body containing a file that is the binary contents of the image.",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The type of source image file",
|
||||
"enum": [
|
||||
"raw",
|
||||
"vmdk",
|
||||
"qcow2",
|
||||
"qed",
|
||||
"vdi",
|
||||
"vpc"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "image_format",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Upload a disk image to Ceph volume {volume} in pool {pool}",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/vm": {
|
||||
"get": {
|
||||
"description": "",
|
||||
@ -5142,6 +5828,18 @@
|
||||
"in": "query",
|
||||
"name": "force",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "Whether to block waiting for the migration to complete",
|
||||
"in": "query",
|
||||
"name": "wait",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "Whether to enforce live migration and disable shutdown-based fallback migration",
|
||||
"in": "query",
|
||||
"name": "force_live",
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -5202,6 +5900,12 @@
|
||||
"name": "state",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Whether to block waiting for the state change to complete",
|
||||
"in": "query",
|
||||
"name": "wait",
|
||||
"type": "boolean"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
|
@ -8,14 +8,13 @@ import os
|
||||
import sys
|
||||
import json
|
||||
|
||||
os.environ['PVC_CONFIG_FILE'] = "./client-api/pvc-api.sample.yaml"
|
||||
os.environ['PVC_CONFIG_FILE'] = "./api-daemon/pvcapid.sample.yaml"
|
||||
|
||||
sys.path.append('client-api')
|
||||
sys.path.append('api-daemon')
|
||||
|
||||
pvc_api = __import__('pvc-api')
|
||||
import pvcapid.flaskapi as pvc_api
|
||||
|
||||
swagger_file = "docs/manuals/swagger.json"
|
||||
|
||||
swagger_data = swagger(pvc_api.app)
|
||||
swagger_data['info']['version'] = "1.0"
|
||||
swagger_data['info']['title'] = "PVC Client and Provisioner API"
|
11
gen-api-migrations
Executable file
11
gen-api-migrations
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Generate the database migration files
|
||||
|
||||
VERSION="$( head -1 debian/changelog | awk -F'[()-]' '{ print $2 }' )"
|
||||
|
||||
pushd api-daemon
|
||||
export PVC_CONFIG_FILE="./pvcapid.sample.yaml"
|
||||
./pvcapid-manage.py db migrate -m "PVC version ${VERSION}"
|
||||
./pvcapid-manage.py db upgrade
|
||||
popd
|
24
node-daemon/monitoring/README.md
Normal file
24
node-daemon/monitoring/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
# PVC Node Monitoring Resources
|
||||
|
||||
This directory contains several monitoring resources that can be used with various monitoring systems to track and alert on a PVC cluster system.
|
||||
|
||||
### Munin
|
||||
|
||||
The included munin plugin can be activated by linking to it from `/etc/munin/plugins/pvc`. By default, this plugin triggers a CRITICAL state when either the PVC or Storage cluster becomes Degraded, and is otherwise OK. The overall health is graphed numerically (Optimal is 0, Maintenance is 1, Degraded is 2) so that the cluster health can be tracked over time.
|
||||
|
||||
When using this plugin, it might be useful to adjust the thresholds with a plugin configuration. For instance, one could adjust the Degraded value from CRITICAL to WARNING by adjusting the critical threshold to a value higher than 1.99 (e.g. 3, 10, etc.) so that only the WARNING threshold will be hit. Alternatively one could instead make Maintenance mode trigger a WARNING by lowering the threshold to 0.99.
|
||||
|
||||
Example plugin configuration:
|
||||
|
||||
```
|
||||
[pvc]
|
||||
# Make cluster warn on maintenance
|
||||
env.pvc_cluster_warning 0.99
|
||||
# Disable critical threshold (>2)
|
||||
env.pvc_cluster_critical 3
|
||||
# Make storage warn on maintenance, crit on degraded (latter is default)
|
||||
env.pvc_storage_warning 0.99
|
||||
env.pvc_storage_critical 1.99
|
||||
```
|
||||
|
||||
### Check_MK
|
176
node-daemon/monitoring/munin/pvc
Executable file
176
node-daemon/monitoring/munin/pvc
Executable file
@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
# -*- sh -*-
|
||||
|
||||
: << =cut
|
||||
|
||||
=head1 NAME
|
||||
|
||||
pvc - Plugin to monitor a PVC cluster.
|
||||
|
||||
=head1 CONFIGURATION
|
||||
|
||||
Note that due to how Munin thresholds work, these values must always be slightly less than 1 or 2 respectively,
|
||||
or the alerts will never be triggered.
|
||||
|
||||
Defaults (no config required):
|
||||
|
||||
[pvc]
|
||||
env.warning 1.99
|
||||
env.critical 1.99
|
||||
|
||||
Make degraded cluster WARN only (max value is 2, so 3 effectively disables):
|
||||
|
||||
[pvc]
|
||||
env.pvc_cluster_warning 1.99
|
||||
env.pvc_cluster_critical 3
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Joshua Boniface <joshua@boniface.me>
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
GPLv3
|
||||
|
||||
=head1 BUGS
|
||||
|
||||
=back
|
||||
|
||||
=head1 MAGIC MARKERS
|
||||
|
||||
#%# family=auto
|
||||
#%# capabilities=autoconf
|
||||
|
||||
=cut
|
||||
|
||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
||||
|
||||
warning=1.99
|
||||
critical=1.99
|
||||
|
||||
export PVC_CLIENT_DIR="/run/shm/munin-pvc"
|
||||
PVC_CMD="/usr/bin/pvc --quiet --cluster local status --format json-pretty"
|
||||
JQ_CMD="/usr/bin/jq"
|
||||
|
||||
output_usage() {
|
||||
echo "This plugin outputs numerical values based on the health of the PVC cluster."
|
||||
echo
|
||||
echo "There are separate outputs for both the PVC cluster itself as well as the Ceph storage cluster."
|
||||
echo "In normal operation, i.e. when both clusters are in 'Optimal' state, the plugin returns 0 for"
|
||||
echo "each cluster. When the cluster is placed into 'Maintenance' mode,the plugin returns 1 for each"
|
||||
echo "cluster, and goes into WARN state (limit 0.99); this can be adjusted by overriding the WARNING"
|
||||
echo "threshold of the plugin to something other than 0.99 - note that due to Munin's alerting design,"
|
||||
echo "the warning value must always be very slightly below the whole number. When either cluster"
|
||||
echo "element becomes 'Degraded', the plugin returns 2 for the relevant cluster, which is treated as a"
|
||||
echo "critical. Like the WARNING threshold, this can be overridden, and with the same caveat about limit."
|
||||
exit 0
|
||||
}
|
||||
|
||||
output_autoconf() {
|
||||
$PVC_CMD &>/dev/null
|
||||
pvc_ret=$?
|
||||
$JQ_CMD --version &>/dev/null
|
||||
jq_ret=$?
|
||||
|
||||
if [[ ${pvc_ret} -eq 0 && ${jq_ret} -eq 0 ]]; then
|
||||
echo "yes"
|
||||
elif [[ ${pvc_ret} -ne 0 ]]; then
|
||||
echo "no (no 'pvc' command found or local cluster not usable)"
|
||||
elif [[ ${jq_ret} -ne 0 ]]; then
|
||||
echo "no (no 'jq' command found)"
|
||||
else
|
||||
echo "no (generic failure)"
|
||||
fi
|
||||
}
|
||||
|
||||
output_config() {
|
||||
echo 'graph_title PVC Clusters'
|
||||
echo 'graph_args --base 1000'
|
||||
echo 'graph_vlabel Count'
|
||||
echo 'graph_category pvc'
|
||||
echo 'graph_period second'
|
||||
echo 'graph_info This graph shows the nodes in the PVC cluster.'
|
||||
|
||||
echo 'pvc_cluster.label Cluster Degradation'
|
||||
echo 'pvc_cluster.type GAUGE'
|
||||
echo 'pvc_cluster.max 2'
|
||||
echo 'pvc_cluster.info Whether the PVC cluster is in a degraded state.'
|
||||
print_warning pvc_cluster
|
||||
print_critical pvc_cluster
|
||||
|
||||
echo 'pvc_storage.label Storage Degradation'
|
||||
echo 'pvc_storage.type GAUGE'
|
||||
echo 'pvc_storage.max 2'
|
||||
echo 'pvc_storage.info Whether the storage cluster is in a degraded state.'
|
||||
print_warning pvc_storage
|
||||
print_critical pvc_storage
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
output_values() {
|
||||
PVC_OUTPUT="$( $PVC_CMD )"
|
||||
|
||||
cluster_health="$( $JQ_CMD '.health' <<<"${PVC_OUTPUT}" | tr -d '"' )"
|
||||
cluster_failed_reason="$( $JQ_CMD -r '.health_msg | @csv' <<<"${PVC_OUTPUT}" | tr -d '"' | sed 's/,/, /g' )"
|
||||
case $cluster_health in
|
||||
"Optimal")
|
||||
cluster_value="0"
|
||||
;;
|
||||
"Maintenance")
|
||||
cluster_value="1"
|
||||
;;
|
||||
"Degraded")
|
||||
cluster_value="2"
|
||||
esac
|
||||
|
||||
storage_health="$( $JQ_CMD '.storage_health' <<<"${PVC_OUTPUT}" | tr -d '"' )"
|
||||
storage_failed_reason="$( $JQ_CMD -r '.storage_health_msg | @csv' <<<"${PVC_OUTPUT}" | tr -d '"' | sed 's/,/, /g' )"
|
||||
case $storage_health in
|
||||
"Optimal")
|
||||
storage_value="0"
|
||||
;;
|
||||
"Maintenance")
|
||||
storage_value="1"
|
||||
;;
|
||||
"Degraded")
|
||||
storage_value="2"
|
||||
esac
|
||||
|
||||
|
||||
echo "pvc_cluster.value $cluster_value"
|
||||
if [[ $cluster_value -eq 1 ]]; then
|
||||
echo "pvc_cluster.extinfo Cluster in maintenance mode"
|
||||
elif [[ $cluster_value -eq 2 ]]; then
|
||||
echo "pvc_cluster.extinfo ${cluster_failed_reason}"
|
||||
fi
|
||||
echo "pvc_storage.value $storage_value"
|
||||
if [[ $storage_value -eq 1 ]]; then
|
||||
echo "pvc_storage.extinfo Cluster in maintenance mode"
|
||||
elif [[ $storage_value -eq 2 ]]; then
|
||||
echo "pvc_storage.extinfo ${storage_failed_reason}"
|
||||
fi
|
||||
}
|
||||
|
||||
case $# in
|
||||
0)
|
||||
output_values
|
||||
;;
|
||||
1)
|
||||
case $1 in
|
||||
autoconf)
|
||||
output_autoconf
|
||||
;;
|
||||
config)
|
||||
output_config
|
||||
;;
|
||||
*)
|
||||
output_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
output_usage
|
||||
exit 1
|
||||
esac
|
@ -2,17 +2,19 @@
|
||||
|
||||
[Unit]
|
||||
Description = Parallel Virtual Cluster autoflush daemon
|
||||
After = pvcd.service
|
||||
PartOf = pvcd.target
|
||||
After = pvcnoded.service pvcapid.service zookeeper.service libvirtd.service ssh.service ceph.target
|
||||
Wants = pvcnoded.service
|
||||
PartOf = pvc.target
|
||||
|
||||
[Service]
|
||||
Type = oneshot
|
||||
RemainAfterExit = true
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
TimeoutSec = 30min
|
||||
ExecStartPre = /bin/sleep 30
|
||||
ExecStart = /usr/bin/pvc -c local node unflush --wait
|
||||
ExecStop = /usr/bin/pvc -c local node flush --wait
|
||||
ExecStopPost = /bin/sleep 30
|
||||
ExecStopPost = /bin/sleep 5
|
||||
|
||||
[Install]
|
||||
WantedBy = pvcd.target
|
||||
WantedBy = pvc.target
|
||||
|
@ -1,17 +0,0 @@
|
||||
# Parallel Virtual Cluster node daemon unit file
|
||||
|
||||
[Unit]
|
||||
Description = Parallel Virtual Cluster node daemon
|
||||
After = network-online.target libvirtd.service zookeeper.service
|
||||
PartOf = pvcd.target
|
||||
|
||||
[Service]
|
||||
Type = simple
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
Environment = PYTHONUNBUFFERED=true
|
||||
Environment = PVCD_CONFIG_FILE=/etc/pvc/pvcd.yaml
|
||||
ExecStart = /usr/share/pvc/pvcd.py
|
||||
Restart = on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy = pvcd.target
|
File diff suppressed because it is too large
Load Diff
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
for disk in $( sudo rbd list ${BLSE_STORAGE_POOL_VM} | grep "^${vm}" ); do
|
||||
echo -e " Disk: $disk"
|
||||
locks="$( sudo rbd lock list ${BLSE_STORAGE_POOL_VM}/${disk} | grep '^client' )"
|
||||
echo "${locks}"
|
||||
if [[ -n "${locks}" ]]; then
|
||||
echo -e " LOCK FOUND! Clearing."
|
||||
locker="$( awk '{ print $1 }' <<<"${locks}" )"
|
||||
id="$( awk '{ print $2" "$3 }' <<<"${locks}" )"
|
||||
sudo rbd lock remove ${BLSE_STORAGE_POOL_VM}/${disk} "${id}" "${locker}"
|
||||
fi
|
||||
done
|
@ -1,134 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# zkhandler.py - Secure versioned ZooKeeper updates
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import kazoo.client
|
||||
import uuid
|
||||
|
||||
#import pvcd.log as log
|
||||
|
||||
# Child list function
|
||||
def listchildren(zk_conn, key):
|
||||
children = zk_conn.get_children(key)
|
||||
return children
|
||||
|
||||
# Key deletion function
|
||||
def deletekey(zk_conn, key, recursive=True):
|
||||
zk_conn.delete(key, recursive=recursive)
|
||||
|
||||
# Data read function
|
||||
def readdata(zk_conn, key):
|
||||
data_raw = zk_conn.get(key)
|
||||
data = data_raw[0].decode('utf8')
|
||||
meta = data_raw[1]
|
||||
return data
|
||||
|
||||
# Data write function
|
||||
def writedata(zk_conn, kv):
|
||||
# Start up a transaction
|
||||
zk_transaction = zk_conn.transaction()
|
||||
|
||||
# Proceed one KV pair at a time
|
||||
for key in sorted(kv):
|
||||
data = kv[key]
|
||||
if not data:
|
||||
data = ''
|
||||
|
||||
# Check if this key already exists or not
|
||||
if not zk_conn.exists(key):
|
||||
# We're creating a new key
|
||||
zk_transaction.create(key, str(data).encode('utf8'))
|
||||
else:
|
||||
# We're updating a key with version validation
|
||||
orig_data = zk_conn.get(key)
|
||||
version = orig_data[1].version
|
||||
|
||||
# Set what we expect the new version to be
|
||||
new_version = version + 1
|
||||
|
||||
# Update the data
|
||||
zk_transaction.set_data(key, str(data).encode('utf8'))
|
||||
|
||||
# Set up the check
|
||||
try:
|
||||
zk_transaction.check(key, new_version)
|
||||
except TypeError:
|
||||
print('Zookeeper key "{}" does not match expected version'.format(key))
|
||||
return False
|
||||
|
||||
# Commit the transaction
|
||||
try:
|
||||
zk_transaction.commit()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Key rename function
|
||||
def renamekey(zk_conn, kv):
|
||||
# This one is not transactional because, inexplicably, transactions don't
|
||||
# support either the recursive delete or recursive create operations that
|
||||
# we need. Why? No explanation in the docs that I can find.
|
||||
|
||||
# Proceed one KV pair at a time
|
||||
for key in sorted(kv):
|
||||
old_name = key
|
||||
new_name = kv[key]
|
||||
|
||||
old_data = zk_conn.get(old_name)[0]
|
||||
|
||||
# Find the children of old_name recursively
|
||||
child_keys = list()
|
||||
def get_children(key):
|
||||
children = zk_conn.get_children(key)
|
||||
if not children:
|
||||
child_keys.append(key)
|
||||
else:
|
||||
for ckey in children:
|
||||
get_children('{}/{}'.format(key, ckey))
|
||||
get_children(old_name)
|
||||
|
||||
# Get the data out of each of the child keys
|
||||
child_data = dict()
|
||||
for ckey in child_keys:
|
||||
child_data[ckey] = zk_conn.get(ckey)[0]
|
||||
|
||||
# Create the new parent key
|
||||
zk_conn.create(new_name, old_data, makepath=True)
|
||||
|
||||
# For each child key, create the key and add the data
|
||||
for ckey in child_keys:
|
||||
new_ckey_name = ckey.replace(old_name, new_name)
|
||||
zk_conn.create(new_ckey_name, child_data[ckey], makepath=True)
|
||||
|
||||
# Remove recursively the old key
|
||||
zk_conn.delete(old_name, recursive=True)
|
||||
|
||||
# Write lock function
|
||||
def writelock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.WriteLock('{}'.format(key), lock_id)
|
||||
return lock
|
||||
|
||||
# Read lock function
|
||||
def readlock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.ReadLock('{}'.format(key), lock_id)
|
||||
return lock
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user