Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
0ccfc41398 | |||
ab05e0f3db | |||
9291ce6ffc | |||
dd87951642 | |||
0e4bece441 | |||
b33c0ab0e2 | |||
094d25dafa | |||
150c61d226 | |||
f1c0c9325d | |||
26b0a8b5c1 | |||
f22f291c8b | |||
9100c63e99 | |||
aba567d6c9 |
10
README.md
10
README.md
@ -20,6 +20,16 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
|
||||
|
||||
## Changelog
|
||||
|
||||
#### v0.9.13
|
||||
|
||||
* Adds nicer startup messages for daemons
|
||||
* Adds additional API field for stored_bytes to pool stats
|
||||
* Fixes sorting issues with snapshot lists
|
||||
* Fixes missing increment/decrement of snapshot_count on volumes
|
||||
* Fixes bad calls in pool element API endpoints
|
||||
* Fixes inconsistent bytes_tohuman behaviour in daemons
|
||||
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
|
||||
|
||||
#### v0.9.12
|
||||
|
||||
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
|
||||
|
@ -26,10 +26,31 @@ import pvcapid.flaskapi as pvc_api
|
||||
# Entrypoint
|
||||
##########################################################
|
||||
|
||||
# Version string for startup output
|
||||
version = '0.9.11'
|
||||
|
||||
if pvc_api.config['ssl_enabled']:
|
||||
context = (pvc_api.config['ssl_cert_file'], pvc_api.config['ssl_key_file'])
|
||||
else:
|
||||
context = None
|
||||
|
||||
print('Starting PVC API daemon at {}:{} with SSL={}, Authentication={}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'], pvc_api.config['ssl_enabled'], pvc_api.config['auth_enabled']))
|
||||
# Print our startup messages
|
||||
print('')
|
||||
print('|--------------------------------------------------|')
|
||||
print('| ######## ## ## ###### |')
|
||||
print('| ## ## ## ## ## ## |')
|
||||
print('| ## ## ## ## ## |')
|
||||
print('| ######## ## ## ## |')
|
||||
print('| ## ## ## ## |')
|
||||
print('| ## ## ## ## ## |')
|
||||
print('| ## ### ###### |')
|
||||
print('|--------------------------------------------------|')
|
||||
print('| Parallel Virtual Cluster API daemon v{0: <11} |'.format(version))
|
||||
print('| API version: v{0: <34} |'.format(pvc_api.API_VERSION))
|
||||
print('| Listen: {0: <40} |'.format('{}:{}'.format(pvc_api.config['listen_address'], pvc_api.config['listen_port'])))
|
||||
print('| SSL: {0: <43} |'.format(str(pvc_api.config['ssl_enabled'])))
|
||||
print('| Authentication: {0: <32} |'.format(str(pvc_api.config['auth_enabled'])))
|
||||
print('|--------------------------------------------------|')
|
||||
print('')
|
||||
|
||||
pvc_api.app.run(pvc_api.config['listen_address'], pvc_api.config['listen_port'], threaded=True, ssl_context=context)
|
||||
|
@ -52,16 +52,16 @@ def strtobool(stringv):
|
||||
|
||||
# Parse the configuration file
|
||||
try:
|
||||
pvc_config_file = os.environ['PVC_CONFIG_FILE']
|
||||
pvcapid_config_file = os.environ['PVC_CONFIG_FILE']
|
||||
except Exception:
|
||||
print('Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.')
|
||||
exit(1)
|
||||
|
||||
print('Starting PVC API daemon')
|
||||
print('Loading configuration from file "{}"'.format(pvcapid_config_file))
|
||||
|
||||
# Read in the config
|
||||
try:
|
||||
with open(pvc_config_file, 'r') as cfgfile:
|
||||
with open(pvcapid_config_file, 'r') as cfgfile:
|
||||
o_config = yaml.load(cfgfile, Loader=yaml.BaseLoader)
|
||||
except Exception as e:
|
||||
print('ERROR: Failed to parse configuration file: {}'.format(e))
|
||||
@ -3337,12 +3337,15 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
id:
|
||||
type: integer
|
||||
description: The Ceph pool ID
|
||||
stored_bytes:
|
||||
type: integer
|
||||
description: The stored data size (in bytes, post-replicas)
|
||||
free_bytes:
|
||||
type: integer
|
||||
description: The total free space (in bytes)
|
||||
description: The total free space (in bytes. post-replicas)
|
||||
used_bytes:
|
||||
type: integer
|
||||
description: The total used space (in bytes)
|
||||
description: The total used space (in bytes, pre-replicas)
|
||||
used_percent:
|
||||
type: number
|
||||
description: The ratio of used space to free space
|
||||
@ -3464,7 +3467,7 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper, api_helper.ceph_pool_list(
|
||||
return api_helper.ceph_pool_list(
|
||||
pool,
|
||||
is_fuzzy=False
|
||||
)
|
||||
@ -3508,7 +3511,7 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
api_helper.ceph_pool_add(
|
||||
return api_helper.ceph_pool_add(
|
||||
pool,
|
||||
reqargs.get('pgs', None),
|
||||
reqargs.get('replcfg', None)
|
||||
|
@ -177,8 +177,8 @@ def upload_ova(pool, name, ova_size):
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# Normalize the OVA size to bytes
|
||||
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
|
||||
ova_size = pvc_ceph.format_bytes_fromhuman(ova_size)
|
||||
ova_size_bytes = pvc_ceph.format_bytes_fromhuman(ova_size)
|
||||
ova_size = '{}B'.format(ova_size_bytes)
|
||||
|
||||
# Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
@ -274,7 +274,7 @@ def upload_ova(pool, name, ova_size):
|
||||
vm_volume_size = disk.get('capacity')
|
||||
|
||||
# Normalize the dev size to bytes
|
||||
dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw)
|
||||
dev_size = '{}B'.format(pvc_ceph.format_bytes_fromhuman(dev_size_raw))
|
||||
|
||||
def cleanup_img_maps():
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
@ -368,7 +368,7 @@ def upload_ova(pool, name, ova_size):
|
||||
vm_volume_size = disk.get('capacity')
|
||||
|
||||
# The function always return XXXXB, so strip off the B and convert to an integer
|
||||
vm_volume_size_bytes = int(pvc_ceph.format_bytes_fromhuman(vm_volume_size)[:-1])
|
||||
vm_volume_size_bytes = pvc_ceph.format_bytes_fromhuman(vm_volume_size)
|
||||
vm_volume_size_gb = math.ceil(vm_volume_size_bytes / 1024 / 1024 / 1024)
|
||||
|
||||
query = "INSERT INTO ova_volume (ova, pool, volume_name, volume_format, disk_id, disk_size_gb) VALUES (%s, %s, %s, %s, %s, %s);"
|
||||
|
@ -17,6 +17,7 @@ $EDITOR ${changelog_file}
|
||||
changelog="$( cat ${changelog_file} | grep -v '^#' | sed 's/^*/ */' )"
|
||||
|
||||
sed -i "s,version = '${current_version}',version = '${new_version}'," node-daemon/pvcnoded/Daemon.py
|
||||
sed -i "s,version = '${current_version}',version = '${new_version}'," api-daemon/pvcapid/Daemon.py
|
||||
|
||||
readme_tmpdir=$( mktemp -d )
|
||||
cp README.md ${readme_tmpdir}/
|
||||
|
@ -122,7 +122,7 @@ def format_bytes_fromhuman(datahuman):
|
||||
dataunit = 'B'
|
||||
datasize = int(datahuman)
|
||||
databytes = datasize * byte_unit_matrix[dataunit]
|
||||
return '{}B'.format(databytes)
|
||||
return databytes
|
||||
|
||||
|
||||
# Format ops sizes to/from human-readable units
|
||||
@ -475,7 +475,17 @@ def getVolumeInformation(zk_conn, pool, volume):
|
||||
|
||||
|
||||
def add_volume(zk_conn, pool, name, size):
|
||||
# 1. Create the volume
|
||||
# 1. Verify the size of the volume
|
||||
pool_information = getPoolInformation(zk_conn, pool)
|
||||
size_bytes = format_bytes_fromhuman(size)
|
||||
if size_bytes >= int(pool_information['stats']['free_bytes']):
|
||||
return False, 'ERROR: Requested volume size is greater than the available free space in the pool'
|
||||
|
||||
# Add 'B' if the volume is in bytes
|
||||
if re.match(r'^[0-9]+$', size):
|
||||
size = '{}B'.format(size)
|
||||
|
||||
# 2. Create the volume
|
||||
retcode, stdout, stderr = common.run_os_command('rbd create --size {} --image-feature layering,exclusive-lock {}/{}'.format(size, pool, name))
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to create RBD volume "{}": {}'.format(name, stderr)
|
||||
@ -545,7 +555,7 @@ def resize_volume(zk_conn, pool, name, size):
|
||||
target_lv_conn = libvirt.open(dest_lv)
|
||||
target_vm_conn = target_lv_conn.lookupByName(vm_info['name'])
|
||||
if target_vm_conn:
|
||||
target_vm_conn.blockResize(volume_id, int(format_bytes_fromhuman(size)[:-1]), libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES)
|
||||
target_vm_conn.blockResize(volume_id, format_bytes_fromhuman(size), libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES)
|
||||
target_lv_conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
@ -715,6 +725,16 @@ def add_snapshot(zk_conn, pool, volume, name):
|
||||
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
|
||||
})
|
||||
|
||||
# 3. Update the count of snapshots on this volume
|
||||
volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume))
|
||||
volume_stats = dict(json.loads(volume_stats_raw))
|
||||
# Format the size to something nicer
|
||||
volume_stats['snapshot_count'] = volume_stats['snapshot_count'] + 1
|
||||
volume_stats_raw = json.dumps(volume_stats)
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw
|
||||
})
|
||||
|
||||
return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
|
||||
|
||||
@ -751,6 +771,16 @@ def remove_snapshot(zk_conn, pool, volume, name):
|
||||
# 2. Delete snapshot from Zookeeper
|
||||
zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name))
|
||||
|
||||
# 3. Update the count of snapshots on this volume
|
||||
volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume))
|
||||
volume_stats = dict(json.loads(volume_stats_raw))
|
||||
# Format the size to something nicer
|
||||
volume_stats['snapshot_count'] = volume_stats['snapshot_count'] - 1
|
||||
volume_stats_raw = json.dumps(volume_stats)
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw
|
||||
})
|
||||
|
||||
return True, 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
|
||||
|
||||
|
||||
@ -783,4 +813,4 @@ def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True):
|
||||
else:
|
||||
snapshot_list.append({'pool': pool_name, 'volume': volume_name, 'snapshot': snapshot_name})
|
||||
|
||||
return True, sorted(snapshot_list, key=lambda x: int(x['id']))
|
||||
return True, sorted(snapshot_list, key=lambda x: str(x['snapshot']))
|
||||
|
12
debian/changelog
vendored
12
debian/changelog
vendored
@ -1,3 +1,15 @@
|
||||
pvc (0.9.13-0) unstable; urgency=high
|
||||
|
||||
* Adds nicer startup messages for daemons
|
||||
* Adds additional API field for stored_bytes to pool stats
|
||||
* Fixes sorting issues with snapshot lists
|
||||
* Fixes missing increment/decrement of snapshot_count on volumes
|
||||
* Fixes bad calls in pool element API endpoints
|
||||
* Fixes inconsistent bytes_tohuman behaviour in daemons
|
||||
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 17 Feb 2021 11:33:28 -0500
|
||||
|
||||
pvc (0.9.12-0) unstable; urgency=high
|
||||
|
||||
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
|
||||
|
@ -18,6 +18,16 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
|
||||
|
||||
## Changelog
|
||||
|
||||
#### v0.9.13
|
||||
|
||||
* Adds nicer startup messages for daemons
|
||||
* Adds additional API field for stored_bytes to pool stats
|
||||
* Fixes sorting issues with snapshot lists
|
||||
* Fixes missing increment/decrement of snapshot_count on volumes
|
||||
* Fixes bad calls in pool element API endpoints
|
||||
* Fixes inconsistent bytes_tohuman behaviour in daemons
|
||||
* Adds validation and maximum volume size on creation (must be smaller than the pool free space)
|
||||
|
||||
#### v0.9.12
|
||||
|
||||
* Fixes a bug in the pvcnoded service unit file causing a Zookeeper startup race condition
|
||||
|
@ -621,7 +621,7 @@
|
||||
"stats": {
|
||||
"properties": {
|
||||
"free_bytes": {
|
||||
"description": "The total free space (in bytes)",
|
||||
"description": "The total free space (in bytes. post-replicas)",
|
||||
"type": "integer"
|
||||
},
|
||||
"id": {
|
||||
@ -660,8 +660,12 @@
|
||||
"description": "The total read operations on the pool (pool-lifetime)",
|
||||
"type": "integer"
|
||||
},
|
||||
"stored_bytes": {
|
||||
"description": "The stored data size (in bytes, post-replicas)",
|
||||
"type": "integer"
|
||||
},
|
||||
"used_bytes": {
|
||||
"description": "The total used space (in bytes)",
|
||||
"description": "The total used space (in bytes, pre-replicas)",
|
||||
"type": "integer"
|
||||
},
|
||||
"used_percent": {
|
||||
|
@ -54,7 +54,7 @@ import pvcnoded.CephInstance as CephInstance
|
||||
import pvcnoded.MetadataAPIInstance as MetadataAPIInstance
|
||||
|
||||
# Version string for startup output
|
||||
version = '0.9.12'
|
||||
version = '0.9.13'
|
||||
|
||||
###############################################################################
|
||||
# PVCD - node daemon startup program
|
||||
@ -134,7 +134,7 @@ def readConfig(pvcnoded_config_file, myhostname):
|
||||
|
||||
with open(pvcnoded_config_file, 'r') as cfgfile:
|
||||
try:
|
||||
o_config = yaml.load(cfgfile)
|
||||
o_config = yaml.load(cfgfile, Loader=yaml.SafeLoader)
|
||||
except Exception as e:
|
||||
print('ERROR: Failed to parse configuration file: {}'.format(e))
|
||||
exit(1)
|
||||
@ -331,16 +331,29 @@ if not os.path.exists(config['log_directory']):
|
||||
logger = log.Logger(config)
|
||||
|
||||
# Print our startup messages
|
||||
logger.out('Parallel Virtual Cluster node daemon v{}'.format(version))
|
||||
logger.out('FQDN: {}'.format(myfqdn))
|
||||
logger.out('Host: {}'.format(myhostname))
|
||||
logger.out('ID: {}'.format(mynodeid))
|
||||
logger.out('IPMI hostname: {}'.format(config['ipmi_hostname']))
|
||||
logger.out('Machine details:')
|
||||
logger.out(' CPUs: {}'.format(staticdata[0]))
|
||||
logger.out(' Arch: {}'.format(staticdata[3]))
|
||||
logger.out(' OS: {}'.format(staticdata[2]))
|
||||
logger.out(' Kernel: {}'.format(staticdata[1]))
|
||||
logger.out('')
|
||||
logger.out('|--------------------------------------------------|')
|
||||
logger.out('| ######## ## ## ###### |')
|
||||
logger.out('| ## ## ## ## ## ## |')
|
||||
logger.out('| ## ## ## ## ## |')
|
||||
logger.out('| ######## ## ## ## |')
|
||||
logger.out('| ## ## ## ## |')
|
||||
logger.out('| ## ## ## ## ## |')
|
||||
logger.out('| ## ### ###### |')
|
||||
logger.out('|--------------------------------------------------|')
|
||||
logger.out('| Parallel Virtual Cluster node daemon v{0: <10} |'.format(version))
|
||||
logger.out('| FQDN: {0: <42} |'.format(myfqdn))
|
||||
logger.out('| Host: {0: <42} |'.format(myhostname))
|
||||
logger.out('| ID: {0: <44} |'.format(mynodeid))
|
||||
logger.out('| IPMI hostname: {0: <33} |'.format(config['ipmi_hostname']))
|
||||
logger.out('| Machine details: |')
|
||||
logger.out('| CPUs: {0: <40} |'.format(staticdata[0]))
|
||||
logger.out('| Arch: {0: <40} |'.format(staticdata[3]))
|
||||
logger.out('| OS: {0: <42} |'.format(staticdata[2]))
|
||||
logger.out('| Kernel: {0: <38} |'.format(staticdata[1]))
|
||||
logger.out('|--------------------------------------------------|')
|
||||
logger.out('')
|
||||
|
||||
logger.out('Starting pvcnoded on host {}'.format(myfqdn), state='s')
|
||||
|
||||
# Define some colours for future messages if applicable
|
||||
@ -1142,6 +1155,7 @@ def collect_ceph_stats(queue):
|
||||
# Assemble a useful data structure
|
||||
pool_df = {
|
||||
'id': pool['id'],
|
||||
'stored_bytes': pool['stats']['stored'],
|
||||
'free_bytes': pool['stats']['max_avail'],
|
||||
'used_bytes': pool['stats']['bytes_used'],
|
||||
'used_percent': pool['stats']['percent_used'],
|
||||
|
Reference in New Issue
Block a user