Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
b36ec43a2d | |||
2ac31e0a14 | |||
938d67f96b | |||
f58e95e4c1 | |||
2338aa64f4 | |||
e8c6df49e6 | |||
c208898b34 | |||
1d5b9c33b5 | |||
0820cb3c5b | |||
0f8e5c6536 | |||
593810e53e | |||
185615e6e8 | |||
3a5955b41c | |||
f06e0ea750 | |||
8ecd2c5e80 | |||
256c537159 | |||
a5d495cfaf |
@ -20,6 +20,13 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
|
||||
|
||||
## Changelog
|
||||
|
||||
#### v0.9.9
|
||||
|
||||
* Adds documentation updates
|
||||
* Removes single-element list stripping and fixes surrounding bugs
|
||||
* Adds additional fields to some API endpoints for ease of parsing by clients
|
||||
* Fixes bugs with network configuration
|
||||
|
||||
#### v0.9.8
|
||||
|
||||
* Adds support for cluster backup/restore
|
||||
|
@ -1046,6 +1046,9 @@ class API_VM_Root(Resource):
|
||||
source:
|
||||
type: string
|
||||
description: The parent network bridge on the node
|
||||
vni:
|
||||
type: integer
|
||||
description: The VNI (PVC network) of the network bridge
|
||||
model:
|
||||
type: string
|
||||
description: The virtual network device model
|
||||
@ -1971,7 +1974,7 @@ class API_Network_Root(Resource):
|
||||
id: Message
|
||||
"""
|
||||
if reqargs.get('name_servers', None):
|
||||
name_servers = reqargs.get('name_servers', None).split(',')
|
||||
name_servers = ','.join(reqargs.get('name_servers', None))
|
||||
else:
|
||||
name_servers = ''
|
||||
return api_helper.net_add(
|
||||
@ -2098,7 +2101,7 @@ class API_Network_Element(Resource):
|
||||
id: Message
|
||||
"""
|
||||
if reqargs.get('name_servers', None):
|
||||
name_servers = reqargs.get('name_servers', None).split(',')
|
||||
name_servers = ','.join(reqargs.get('name_servers', None))
|
||||
else:
|
||||
name_servers = ''
|
||||
return api_helper.net_add(
|
||||
@ -2195,7 +2198,7 @@ class API_Network_Element(Resource):
|
||||
id: Message
|
||||
"""
|
||||
if reqargs.get('name_servers', None):
|
||||
name_servers = reqargs.get('name_servers', None).split(',')
|
||||
name_servers = ','.join(reqargs.get('name_servers', None))
|
||||
else:
|
||||
name_servers = ''
|
||||
return api_helper.net_modify(
|
||||
@ -3316,6 +3319,9 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
volume_count:
|
||||
type: integer
|
||||
description: The number of volumes in the pool
|
||||
stats:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -205,10 +205,6 @@ def node_list(limit=None, daemon_state=None, coordinator_state=None, domain_stat
|
||||
'message': retdata
|
||||
}
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
return retdata, retcode
|
||||
|
||||
|
||||
@ -394,10 +390,6 @@ def vm_state(vm):
|
||||
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -427,10 +419,6 @@ def vm_node(vm):
|
||||
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -490,10 +478,6 @@ def vm_list(node=None, state=None, limit=None, is_fuzzy=True):
|
||||
retflag, retdata = pvc_vm.get_list(zk_conn, node, state, limit, is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -545,10 +529,6 @@ def get_vm_meta(vm):
|
||||
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -820,11 +800,7 @@ def vm_flush_locks(vm):
|
||||
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retdata['state'] not in ['stop', 'disable']:
|
||||
if retdata[0].get('state') not in ['stop', 'disable']:
|
||||
return {"message": "VM must be stopped to flush locks"}, 400
|
||||
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
@ -853,10 +829,6 @@ def net_list(limit=None, is_fuzzy=True):
|
||||
retflag, retdata = pvc_network.get_list(zk_conn, limit, is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -1029,10 +1001,6 @@ def net_acl_list(network, limit=None, direction=None, is_fuzzy=True):
|
||||
'message': retdata
|
||||
}
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
return retdata, retcode
|
||||
|
||||
|
||||
@ -1281,10 +1249,6 @@ def ceph_pool_list(limit=None, is_fuzzy=True):
|
||||
retflag, retdata = pvc_ceph.get_list_pool(zk_conn, limit, is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -1348,10 +1312,6 @@ def ceph_volume_list(pool=None, limit=None, is_fuzzy=True):
|
||||
retflag, retdata = pvc_ceph.get_list_volume(zk_conn, pool, limit, is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
@ -1623,10 +1583,6 @@ def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True)
|
||||
retflag, retdata = pvc_ceph.get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
||||
# If this is a single element, strip it out of the list
|
||||
if isinstance(retdata, list) and len(retdata) == 1:
|
||||
retdata = retdata[0]
|
||||
|
||||
if retflag:
|
||||
if retdata:
|
||||
retcode = 200
|
||||
|
@ -164,7 +164,16 @@ def ceph_osd_info(config, osd):
|
||||
response = call_api(config, 'get', '/storage/ceph/osd/{osd}'.format(osd=osd))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "OSD not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -300,9 +309,6 @@ def format_list_osd(osd_list):
|
||||
# Handle empty list
|
||||
if not osd_list:
|
||||
osd_list = list()
|
||||
# Handle single-item list
|
||||
if not isinstance(osd_list, list):
|
||||
osd_list = [osd_list]
|
||||
|
||||
osd_list_output = []
|
||||
|
||||
@ -555,7 +561,16 @@ def ceph_pool_info(config, pool):
|
||||
response = call_api(config, 'get', '/storage/ceph/pool/{pool}'.format(pool=pool))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Pool not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -628,9 +643,6 @@ def format_list_pool(pool_list):
|
||||
# Handle empty list
|
||||
if not pool_list:
|
||||
pool_list = list()
|
||||
# Handle single-entry list
|
||||
if not isinstance(pool_list, list):
|
||||
pool_list = [pool_list]
|
||||
|
||||
pool_list_output = []
|
||||
|
||||
@ -835,7 +847,16 @@ def ceph_volume_info(config, pool, volume):
|
||||
response = call_api(config, 'get', '/storage/ceph/volume/{pool}/{volume}'.format(volume=volume, pool=pool))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Volume not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -989,9 +1010,6 @@ def format_list_volume(volume_list):
|
||||
# Handle empty list
|
||||
if not volume_list:
|
||||
volume_list = list()
|
||||
# Handle single-entry list
|
||||
if not isinstance(volume_list, list):
|
||||
volume_list = [volume_list]
|
||||
|
||||
volume_list_output = []
|
||||
|
||||
@ -1112,7 +1130,16 @@ def ceph_snapshot_info(config, pool, volume, snapshot):
|
||||
response = call_api(config, 'get', '/storage/ceph/snapshot/{pool}/{volume}/{snapshot}'.format(snapshot=snapshot, volume=volume, pool=pool))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Snapshot not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -1209,9 +1236,6 @@ def format_list_snapshot(snapshot_list):
|
||||
# Handle empty list
|
||||
if not snapshot_list:
|
||||
snapshot_list = list()
|
||||
# Handle single-entry list
|
||||
if not isinstance(snapshot_list, list):
|
||||
snapshot_list = [snapshot_list]
|
||||
|
||||
snapshot_list_output = []
|
||||
|
||||
|
@ -67,7 +67,16 @@ def net_info(config, net):
|
||||
response = call_api(config, 'get', '/network/{net}'.format(net=net))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Network not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -148,7 +157,7 @@ def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_
|
||||
if ip6_gateway is not None:
|
||||
params['ip6_gateway'] = ip6_gateway
|
||||
if dhcp4_flag is not None:
|
||||
params['dhcp4_flag'] = dhcp4_flag
|
||||
params['dhcp4'] = dhcp4_flag
|
||||
if dhcp4_start is not None:
|
||||
params['dhcp4_start'] = dhcp4_start
|
||||
if dhcp4_end is not None:
|
||||
@ -196,7 +205,16 @@ def net_dhcp_info(config, net, mac):
|
||||
response = call_api(config, 'get', '/network/{net}/lease/{mac}'.format(net=net, mac=mac))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Lease not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -281,7 +299,16 @@ def net_acl_info(config, net, description):
|
||||
response = call_api(config, 'get', '/network/{net}/acl/{description}'.format(net=net, description=description))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "ACL not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -440,10 +467,6 @@ def format_list(config, network_list):
|
||||
if not network_list:
|
||||
return "No network found"
|
||||
|
||||
# Handle single-element lists
|
||||
if not isinstance(network_list, list):
|
||||
network_list = [network_list]
|
||||
|
||||
network_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
@ -617,9 +640,6 @@ def format_list_acl(acl_list):
|
||||
# Handle when we get an empty entry
|
||||
if not acl_list:
|
||||
acl_list = list()
|
||||
# Handle when we get a single entry
|
||||
if isinstance(acl_list, dict):
|
||||
acl_list = [acl_list]
|
||||
|
||||
acl_list_output = []
|
||||
|
||||
|
@ -81,7 +81,16 @@ def node_info(config, node):
|
||||
response = call_api(config, 'get', '/node/{node}'.format(node=node))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match, return not found
|
||||
return False, "Node not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -186,10 +195,6 @@ def format_info(node_information, long_output):
|
||||
|
||||
|
||||
def format_list(node_list, raw):
|
||||
# Handle single-element lists
|
||||
if not isinstance(node_list, list):
|
||||
node_list = [node_list]
|
||||
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for node in sorted(item['name'] for item in node_list):
|
||||
|
@ -42,7 +42,16 @@ def template_info(config, template, template_type):
|
||||
response = call_api(config, 'get', '/provisioner/template/{template_type}/{template}'.format(template_type=template_type, template=template))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Template not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -171,7 +180,16 @@ def userdata_info(config, userdata):
|
||||
response = call_api(config, 'get', '/provisioner/userdata/{userdata}'.format(userdata=userdata))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Userdata not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -294,7 +312,16 @@ def script_info(config, script):
|
||||
response = call_api(config, 'get', '/provisioner/script/{script}'.format(script=script))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Script not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -417,7 +444,16 @@ def ova_info(config, name):
|
||||
response = call_api(config, 'get', '/provisioner/ova/{name}'.format(name=name))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "OVA not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -504,7 +540,16 @@ def profile_info(config, profile):
|
||||
response = call_api(config, 'get', '/provisioner/profile/{profile}'.format(profile=profile))
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()[0]
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "Profile not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
|
@ -41,16 +41,16 @@ def vm_info(config, vm):
|
||||
response = call_api(config, 'get', '/vm/{vm}'.format(vm=vm))
|
||||
|
||||
if response.status_code == 200:
|
||||
if isinstance(response.json(), list) and len(response.json()) > 1:
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match; return not found
|
||||
return False, "VM not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
response = response.json()[0]
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
response = response.json()
|
||||
return True, response
|
||||
return True, response.json()
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get('message', '')
|
||||
|
||||
@ -1138,7 +1138,7 @@ def format_info(config, domain_information, long_output):
|
||||
formatted_node_autostart = domain_information['node_autostart']
|
||||
|
||||
if not domain_information.get('migration_method'):
|
||||
formatted_migration_method = "none"
|
||||
formatted_migration_method = "any"
|
||||
else:
|
||||
formatted_migration_method = domain_information['migration_method']
|
||||
|
||||
@ -1211,10 +1211,6 @@ def format_info(config, domain_information, long_output):
|
||||
|
||||
|
||||
def format_list(config, vm_list, raw):
|
||||
# Handle single-element lists
|
||||
if not isinstance(vm_list, list):
|
||||
vm_list = [vm_list]
|
||||
|
||||
# Function to strip the "br" off of nets and return a nicer list
|
||||
def getNiceNetID(domain_information):
|
||||
# Network list
|
||||
|
@ -1601,38 +1601,38 @@ def net_add(vni, description, nettype, domain, ip_network, ip_gateway, ip6_netwo
|
||||
@click.option(
|
||||
'-i', '--ipnet', 'ip4_network',
|
||||
default=None,
|
||||
help='CIDR-format IPv4 network address for subnet.'
|
||||
help='CIDR-format IPv4 network address for subnet; disable with "".'
|
||||
)
|
||||
@click.option(
|
||||
'-i6', '--ipnet6', 'ip6_network',
|
||||
default=None,
|
||||
help='CIDR-format IPv6 network address for subnet.'
|
||||
help='CIDR-format IPv6 network address for subnet; disable with "".'
|
||||
)
|
||||
@click.option(
|
||||
'-g', '--gateway', 'ip4_gateway',
|
||||
default=None,
|
||||
help='Default IPv4 gateway address for subnet.'
|
||||
help='Default IPv4 gateway address for subnet; disable with "".'
|
||||
)
|
||||
@click.option(
|
||||
'-g6', '--gateway6', 'ip6_gateway',
|
||||
default=None,
|
||||
help='Default IPv6 gateway address for subnet.'
|
||||
help='Default IPv6 gateway address for subnet; disable with "".'
|
||||
)
|
||||
@click.option(
|
||||
'--dhcp/--no-dhcp', 'dhcp_flag',
|
||||
is_flag=True,
|
||||
default=None,
|
||||
help='Enable/disable DHCP for clients on subnet.'
|
||||
help='Enable/disable DHCPv4 for clients on subnet (DHCPv6 is always enabled if DHCPv6 network is set).'
|
||||
)
|
||||
@click.option(
|
||||
'--dhcp-start', 'dhcp_start',
|
||||
default=None,
|
||||
help='DHCP range start address.'
|
||||
help='DHCPvr range start address.'
|
||||
)
|
||||
@click.option(
|
||||
'--dhcp-end', 'dhcp_end',
|
||||
default=None,
|
||||
help='DHCP range end address.'
|
||||
help='DHCPv4 range end address.'
|
||||
)
|
||||
@click.argument(
|
||||
'vni'
|
||||
@ -4144,7 +4144,7 @@ def task_restore(filename, confirm_flag):
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc init
|
||||
# pvc task init
|
||||
###############################################################################
|
||||
@click.command(name='init', short_help='Initialize a new cluster.')
|
||||
@click.option(
|
||||
|
@ -347,9 +347,11 @@ def getPoolInformation(zk_conn, pool):
|
||||
# Parse the stats data
|
||||
pool_stats_raw = zkhandler.readdata(zk_conn, '/ceph/pools/{}/stats'.format(pool))
|
||||
pool_stats = dict(json.loads(pool_stats_raw))
|
||||
volume_count = len(getCephVolumes(zk_conn, pool))
|
||||
|
||||
pool_information = {
|
||||
'name': pool,
|
||||
'volume_count': volume_count,
|
||||
'stats': pool_stats
|
||||
}
|
||||
return pool_information
|
||||
|
@ -27,6 +27,7 @@ import shlex
|
||||
import subprocess
|
||||
import kazoo.client
|
||||
from json import loads
|
||||
from re import match as re_match
|
||||
|
||||
from distutils.util import strtobool
|
||||
|
||||
@ -359,6 +360,7 @@ def getDomainNetworks(parsed_xml, stats_data):
|
||||
net_wr_drops = net_stats.get('wr_drops', 0)
|
||||
net_obj = {
|
||||
'type': net_type,
|
||||
'vni': re_match(r'[vm]*br([0-9a-z]+)', net_bridge).group(1),
|
||||
'mac': net_mac,
|
||||
'source': net_bridge,
|
||||
'model': net_model,
|
||||
|
@ -292,23 +292,23 @@ def modify_network(zk_conn, vni, description=None, domain=None, name_servers=Non
|
||||
dhcp4_flag=None, dhcp4_start=None, dhcp4_end=None):
|
||||
# Add the modified parameters to Zookeeper
|
||||
zk_data = dict()
|
||||
if description:
|
||||
if description is not None:
|
||||
zk_data.update({'/networks/{}'.format(vni): description})
|
||||
if domain:
|
||||
if domain is not None:
|
||||
zk_data.update({'/networks/{}/domain'.format(vni): domain})
|
||||
if name_servers:
|
||||
if name_servers is not None:
|
||||
zk_data.update({'/networks/{}/name_servers'.format(vni): name_servers})
|
||||
if ip4_network:
|
||||
if ip4_network is not None:
|
||||
zk_data.update({'/networks/{}/ip4_network'.format(vni): ip4_network})
|
||||
if ip4_gateway:
|
||||
if ip4_gateway is not None:
|
||||
zk_data.update({'/networks/{}/ip4_gateway'.format(vni): ip4_gateway})
|
||||
if ip6_network:
|
||||
if ip6_network is not None:
|
||||
zk_data.update({'/networks/{}/ip6_network'.format(vni): ip6_network})
|
||||
if ip6_network is not None:
|
||||
if ip6_network:
|
||||
zk_data.update({'/networks/{}/dhcp6_flag'.format(vni): 'True'})
|
||||
else:
|
||||
zk_data.update({'/networks/{}/dhcp6_flag'.format(vni): 'False'})
|
||||
if ip6_gateway:
|
||||
if ip6_gateway is not None:
|
||||
zk_data.update({'/networks/{}/ip6_gateway'.format(vni): ip6_gateway})
|
||||
else:
|
||||
# If we're changing the network, but don't also specify the gateway,
|
||||
@ -317,11 +317,11 @@ def modify_network(zk_conn, vni, description=None, domain=None, name_servers=Non
|
||||
ip6_netpart, ip6_maskpart = ip6_network.split('/')
|
||||
ip6_gateway = '{}1'.format(ip6_netpart)
|
||||
zk_data.update({'/networks/{}/ip6_gateway'.format(vni): ip6_gateway})
|
||||
if dhcp4_flag:
|
||||
if dhcp4_flag is not None:
|
||||
zk_data.update({'/networks/{}/dhcp4_flag'.format(vni): dhcp4_flag})
|
||||
if dhcp4_start:
|
||||
if dhcp4_start is not None:
|
||||
zk_data.update({'/networks/{}/dhcp4_start'.format(vni): dhcp4_start})
|
||||
if dhcp4_end:
|
||||
if dhcp4_end is not None:
|
||||
zk_data.update({'/networks/{}/dhcp4_end'.format(vni): dhcp4_end})
|
||||
|
||||
zkhandler.writedata(zk_conn, zk_data)
|
||||
|
9
debian/changelog
vendored
9
debian/changelog
vendored
@ -1,3 +1,12 @@
|
||||
pvc (0.9.9-0) unstable; urgency=high
|
||||
|
||||
* Adds documentation updates
|
||||
* Removes single-element list stripping and fixes surrounding bugs
|
||||
* Adds additional fields to some API endpoints for ease of parsing by clients
|
||||
* Fixes bugs with network configuration
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 09 Dec 2020 02:20:20 -0500
|
||||
|
||||
pvc (0.9.8-0) unstable; urgency=high
|
||||
|
||||
* Adds support for cluster backup/restore
|
||||
|
@ -53,15 +53,23 @@ All the components are designed to be run on top of Debian GNU/Linux, specifical
|
||||
|
||||
## Cluster Architecture
|
||||
|
||||
A PVC cluster is based around "nodes", which are physical servers on which the various daemons, storage, networks, and virtual machines run. Each node is self-contained and is able to perform any and all cluster functions if needed; there is no segmentation of function between different types of physical hosts.
|
||||
A PVC cluster is based around "nodes", which are physical servers on which the various daemons, storage, networks, and virtual machines run. Each node is self-contained and is able to perform any and all cluster functions if needed; there is no segmentation of function between different types of physical hosts. Ideally, all nodes in a cluster will be identical in specifications, but in some situations mismatched nodes are acceptable, with limitations.
|
||||
|
||||
A limited number of nodes, called "coordinators", are statically configured to provide additional services for the cluster. For instance, all databases, FRRouting instances, and Ceph management daemons run only on the set of cluster coordinators. At cluster bootstrap, 1 (testing-only), 3 (small clusters), or 5 (large clusters) nodes may be chosen as the coordinators. Other nodes can then be added as "hypervisor" nodes, which then provide only block device (storage) and VM (compute) functionality by connecting to the set of coordinators. This limits the scaling problem of the databases while ensuring there is still maximum redundancy and resiliency for the core cluster services. Which nodes are designated as coordinators can be changed should the administrator so desire, simply by installing the required software on additional nodes, though this is not recommended (the Ceph system in particular is cumbersome to reconfigure).
|
||||
A subset of the nodes, called "coordinators", are statically configured to provide additional services for the cluster. For instance, all databases, FRRouting instances, and Ceph management daemons run only on the set of cluster coordinators. At cluster bootstrap, 1 (testing-only), 3 (small clusters), or 5 (large clusters) nodes may be chosen as the coordinators. Other nodes can then be added as "hypervisor" nodes, which then provide only block device (storage) and VM (compute) functionality by connecting to the set of coordinators. This limits the scaling problem of the databases while ensuring there is still maximum redundancy and resiliency for the core cluster services.
|
||||
|
||||
Additional nodes can be added to the cluster either as coordinators, or as hypervisors, by adding them to the Ansible configuration and running it against the full set of nodes. Note that the number of coordinators must always be odd, and more than 5 coordinators are normally unnecessary and can cause issues with the database; it is thus normally advisable to add any nodes beyond the initial set as hypervisors instead of coordinators. Nodes can be removed from service, but this is a manual process and should not be attempted unless absolutely required; the Ceph subsystem in particular is sensitive to changes in the coordinator nodes.
|
||||
|
||||
During runtime, one coordinator is elected the "primary" for the cluster. This designation can shift dynamically in response to cluster events, or be manually migrated by an administrator. The coordinator takes on a number of roles for which only one host may be active at once, for instance to provide DHCP services to managed client networks or to interface with the API.
|
||||
|
||||
Nodes are networked together via a set of statically-configured networks. At a minimum, 2 discrete networks are required, with an optional 3rd. The "upstream" network is the primary network for the nodes, and provides functions such as upstream Internet access, routing to and from the cluster nodes, and management via the API; it may be either a firewalled public or NAT'd RFC1918 network, but should never be exposed directly to the Internet. The "cluster" network is an unrouted RFC1918 network which provides inter-node communication for managed client network traffic (VXLANs), cross-node routing, VM migration and failover, and database replication and access. Finally, though optionally collapsed with the "cluster" network, the "storage" network is another unrouted RFC1918 network which provides a dedicated logical and/or physical link between the nodes for storage traffic, including VM block device storage traffic, inter-OSD replication traffic, and Ceph heartbeat traffic, thus allowing it to be completely isolated from the other networks for maximum performance. With each network is a single "floating" IP address which follows the primary coordinator, providing a single interface to the cluster. Once configured, the cluster is then able to create additional networks of two kinds, "bridged" traditional vLANs and "managed" routed VXLANs, to provide network access to VMs.
|
||||
Nodes are networked together via a set of statically-configured networks. At a minimum, 2 discrete networks are required, with an optional 3rd.
|
||||
|
||||
Further information about the general cluster architecture, including important considerations for node specifications/sizing and network configuration, can be found at the [cluster architecture page](/cluster-architecture).
|
||||
* The "upstream" network is the primary network for the nodes, and provides functions such as upstream Internet access, routing to and from the cluster nodes, and management via the API; it may be either a firewalled public or NAT'd RFC1918 network, but should never be exposed directly to the Internet.
|
||||
* The "cluster" network is an unrouted RFC1918 network which provides inter-node communication for managed client network traffic (VXLANs), cross-node routing, VM migration and failover, and database replication and access.
|
||||
* The "storage" network is another unrouted RFC1918 network which provides a dedicated logical and/or physical link between the nodes for storage traffic, including VM block device storage traffic, inter-OSD replication traffic, and Ceph heartbeat traffic, thus allowing it to be completely isolated from the other networks for maximum performance. This network can be optionally colocated with the "cluster" network, by specifying the same device for both, and can be further combined by specifying the same IP for both to completely collapse the "cluster" and "storage" networks. This may be ideal to simply management of small clusters.
|
||||
|
||||
Within each network is a single "floating" IP address which follows the primary coordinator, providing a single interface to the cluster. Once configured, the cluster is then able to create additional networks of two kinds, "bridged" traditional vLANs and "managed" routed VXLANs, to provide network access to VMs.
|
||||
|
||||
Further information about the general cluster architecture, including important considerations for node specifications/sizing and network configuration, [can be found at the cluster architecture page](/cluster-architecture). It is imperative that potential PVC administrators read this document thoroughly to understand the specific requirements of PVC and avoid potential missteps in obtaining and deploying their cluster.
|
||||
|
||||
## Clients
|
||||
|
||||
|
@ -64,35 +64,37 @@ For memory provisioning of VMs, PVC will warn the administrator, via a Degraded
|
||||
|
||||
### Operating System and Architecture
|
||||
|
||||
As an underlying OS, only Debian 10 "Buster" is supported by PVC. This is the operating system installed by the PVC [node installer](https://github.com/parallelvirtualcluster/pvc-installer) and expected by the PVC [Ansible configuration system](https://github.com/parallelvirtualcluster/pvc-ansible). Ubuntu or other Debian-derived distributions may work, but are not officially supported. PVC also makes use of a custom repository to provide the PVC software and an updated version of Ceph beyond what is available in the base operating system, and this is only compatible officially with Debian 10 "Buster".
|
||||
As an underlying OS, only Debian GNU/Linux 10.x "Buster" is supported by PVC. This is the operating system installed by the PVC [node installer](https://github.com/parallelvirtualcluster/pvc-installer) and expected by the PVC [Ansible configuration system](https://github.com/parallelvirtualcluster/pvc-ansible). Ubuntu or other Debian-derived distributions may work, but are not officially supported. PVC also makes use of a custom repository to provide the PVC software and an updated version of Ceph beyond what is available in the base operating system, and this is only compatible officially with Debian 10 "Buster". PVC will, in the future, upgrade to future versions of Debian based on their release schedule and testing; releases may be skipped for official support if required. As a general rule, using the current versions of the official node installer and Ansible repository is the preferred and only supported method for deploying PVC.
|
||||
|
||||
Currently, only the `amd64` (Intel 64 or AMD64) architecture is officially supported by PVC. Given the cross-platform nature of Python and the various software components in Debian, it may work on `armhf` or `arm64` systems as well, however this has not been tested by the author.
|
||||
Currently, only the `amd64` (Intel 64 or AMD64) architecture is officially supported by PVC. Given the cross-platform nature of Python and the various software components in Debian, it may work on `armhf` or `arm64` systems as well, however this has not been tested by the author and is not officially supported at this time.
|
||||
|
||||
## Storage Layout: Ceph and OSDs
|
||||
|
||||
The Ceph subsystem of PVC, if enabled, creates a "hyperconverged" cluster whereby storage and VM hypervisor functions are collocated onto the same physical servers. The performance of the storage must be taken into account when sizing the nodes as mentioned above.
|
||||
PVC makes use of Ceph, a distributed, replicated, self-healing, and self-managing storage system to provide shared VM storage. While a PVC administrator is not required to understand Ceph for day-to-day administraton, and PVC provides interfaces to most of the common storage functions required to operate a cluster, at least some knowledge of Ceph is advisable.
|
||||
|
||||
The Ceph system is laid out similar to the other daemons. The Ceph Monitor and Manager functions are delegated to the Coordinators over the storage network, with all nodes connecting to these hosts to obtain the CRUSH maps and select OSD disks. OSDs are then distributed on all hosts, including non-coordinator hypervisors, and communicate with clients and each other over the storage network.
|
||||
The Ceph subsystem of PVC creates a "hyperconverged" cluster whereby storage and VM hypervisor functions are collocated onto the same physical servers; PVC does not differentiate between "storage" and "compute" nodes, and while storage support can be disabled and an external Ceph cluster used, this is not recommended. The performance of the storage must be taken into account when sizing the nodes as mentioned above.
|
||||
|
||||
Disks must be balanced across all nodes. Therefore, adding 1 disk to 1 node is not sufficient; 1 disk must be added to all nodes at the same time for the available space to increase. Ideally, disk sizes should also be identical across all storage disks, though the weight of each disk can be configured when added to the cluster. Generally speaking, fewer larger disks are preferable to many smaller disks to minimize storage resource utilization, however slightly more storage performance can be gained from using many small disks; the administrator should therefore always aim to choose the biggest disks they can and grow by adding more identical disks as space or performance needs grow.
|
||||
Ceph on PVC is laid out similar to the other daemons. The Ceph Monitor and Manager functions are delegated to the Coordinators over the storage network, with all nodes connecting to these hosts to obtain the CRUSH maps and select OSD disks. OSDs are then distributed on all hosts, potentially including non-coordinator hypervisors if desired, and communicate with clients and each other over the storage network.
|
||||
|
||||
PVC Ceph pools make use of the replication mechanism of Ceph to store multiple copies of each object, thus ensuring that data is always available even when a host is unavailable. Only "replica"-based Ceph redundancy is supported by PVC; erasure coded pools are not supported due to major performance impacts related to rewrites and random I/O.
|
||||
Disks must be balanced across all storage-containing nodes. For instance, adding 1 disk to 1 node is not sufficient to increase storage space; 1 disk must be added to all storage-containing nodes, based on the configured replication scheme of the various pools (see below), at the same time for the available space to increase. Ideally, disk sizes should also be identical across all storage disks, though the weight of each disk can be configured when added to the cluster. Generally speaking, fewer larger disks are preferable to many smaller disks to minimize storage resource utilization, however slightly more storage performance can be gained from using many small disks, if the other cluster hardware, and specifically CPUs, are performant enough. The administrator should therefore always aim to choose the biggest disks they can and grow by adding more identical disks as space or performance needs grow.
|
||||
|
||||
The default replication level for a new pool is `copies=3, mincopies=2`. This will store 3 copies of each object, with a host-level failure domain, and will allow I/O as long as 2 copies are available. Thus, in a cluster of any size, all data is fully available even if a single host becomes unavailable. It will however use 3x the space for each piece of data stored, which must be considered when sizing the disk space for the cluster: a pool in this configuration, running on 3 nodes each with a single 400GB disk, will effectively have 400GB of total space available for use. As mentioned above, new disks must also be added in groups across nodes equal to the total number of `copies` to ensure new space is usable.
|
||||
PVC Ceph pools make use of the replication mechanism of Ceph to store multiple copies of each object, thus ensuring that data is always available even when a host is unavailable. Only "replica"-based Ceph redundancy is supported by PVC; erasure coded pools are not supported due to major performance impacts related to rewrites and random I/O as well as management overhead.
|
||||
|
||||
Non-default values can also be set at pool creation time. For instance, one could create a `copies=3, mincopies=1` pool, which would allow I/O with two hosts down but leaves the cluster susceptible to a write hole should a disk fail in this state. Alternatively, for more resilience, one could create a `copies=4, mincopies=3` pool, which will allow 2 hosts to fail without a write hole, but would consume 4x the space for each piece of data stored and require new disks to be added in groups of 4 instead. Practically any combination of values is possible, however these 3 are the most relevant for most use-cases, and for most, especially small, clusters, the default is sufficient to provide solid redundancy and guard against host failures until the administrator can respond.
|
||||
The default replication level for a new pool is `copies=3, mincopies=2`. This will store 3 copies of each object, with a host-level failure domain, and will allow I/O as long as 2 copies are available. Thus, in a cluster of any size, all data is fully available even if a single host becomes unavailable. It will however use 3x the space for each piece of data stored, which must be considered when sizing the disk space for the cluster: a pool in this configuration, running on 3 nodes each with a single 400GB disk, will effectively have 400GB of total space available for use. As mentioned above, new disks must also be added in groups across nodes equal to the total number of `copies` to ensure new space is usable; for instance in a `copies=3` scheme, at least 3 disks must thus be added to different hosts at the same time for the avilable space to grow.
|
||||
|
||||
Replication levels cannot be changed within PVC once a pool is created, however they can be changed via manual Ceph commands on a coordinator should the administrator require this. In any case, the administrator should carefully consider sizing, failure domains, and performance when selecting storage devices to ensure the right level of resiliency versus data usage for their use-case and cluster size.
|
||||
Non-default values can also be set at pool creation time. For instance, one could create a `copies=3, mincopies=1` pool, which would allow I/O with two hosts down, but leaves the cluster susceptible to a write hole should a disk fail in this state; this configuration is not recommended in most situations. Alternatively, for additional resilience, one could create a `copies=4, mincopies=2` pool, which would also allow 2 hosts to fail, without a write hole, but would consume 4x the space for each piece of data stored and require new disks to be added in groups of 4 instead. Practically any combination of values is possible, however these 3 are the most relevant for most use-cases, and for most, especially small, clusters, the default is sufficient to provide solid redundancy and guard against host failures until the administrator can respond.
|
||||
|
||||
Replication levels cannot be changed within PVC once a pool is created, however they can be changed via manual Ceph commands on a coordinator should the administrator require this, though discussion of this process is outside of the scope of this documentation. The administrator should carefully consider sizing, failure domains, and performance when first selecting storage devices and creating pools, to ensure the right level of resiliency versus data usage for their use-case and planned cluster size.
|
||||
|
||||
## Physical network considerations
|
||||
|
||||
At a minimum, a production PVC cluster should use at least two 1Gbps Ethernet interfaces, connected in an LACP or active-backup bond on one or more switches. On top of this bond, the various cluster networks are configured as 802.3q vLANs. PVC is be able to support configurations without 802.1q vLAN support using multiple physical interfaces and no bridged client networks, but this is strongly discouraged due to the added complexity this introduces; the switches chosen for the cluster should include these requirements as a minimum.
|
||||
At a minimum, a production PVC cluster should use at least two 1Gbps Ethernet interfaces, connected in an LACP or active-backup bond on one or more switches. On top of this bond, the various cluster networks should be configured as 802.3q vLANs. PVC is be able to support configurations without bonding or 802.1q vLAN support, using multiple physical interfaces and no bridged client networks, but this is strongly discouraged due to the added complexity this introduces; the switches chosen for the cluster should include these requirements as a minimum.
|
||||
|
||||
More advanced physical network layouts are also possible. For instance, one could have two isolated networks. On the first network, each node has two 10Gbps Ethernet interfaces, which are combined in a bond across two redundant switch fabrics and that handle the upstream and cluster networks. On the second network, each node has an additional two 10Gbps, which are also combined in a bond across the redundant switch fabrics and handle the storage network. This configuration could support up to 10Gbps of aggregate client traffic while also supporting 10Gbps of aggregate storage traffic. Even more complex network configurations are possible if the cluster requires such performance. See the [Example Configurations](#example-configurations) section for some examples.
|
||||
More advanced physical network layouts are also possible. For instance, one could have two isolated networks. On the first network, each node has two 10Gbps Ethernet interfaces, which are combined in a bond across two redundant switch fabrics and that handle the upstream and cluster networks. On the second network, each node has an additional two 10Gbps, which are also combined in a bond across the redundant switch fabrics and handle the storage network. This configuration could support up to 10Gbps of aggregate client traffic while also supporting 10Gbps of aggregate storage traffic. Even more complex network configurations are possible if the cluster requires such performance. See the [Example Configurations](#example-configurations) section for some basic topology examples.
|
||||
|
||||
Only Ethernet networks are supported by PVC. More exotic interconnects such as Infiniband are not supported by default, and must be manually set up with Ethernet (e.g. EoIB) layers on top to be usable with PVC.
|
||||
|
||||
PVC manages the IP addressing of all nodes itself and creates the required addresses during node daemon startup; thus, the on-boot network configuration of each interface should be set to "manual" with no IP addresses configured.
|
||||
PVC manages the IP addressing of all nodes itself and creates the required addresses during node daemon startup; thus, the on-boot network configuration of each interface should be set to "manual" with no IP addresses configured. This can be ignored safely, however, and the addresses specified manually in the networking configurations. PVC nodes use a split (`/etc/network/interfaces.d/<iface>`) network configuration model.
|
||||
|
||||
## Network Layout: Considering the required networks
|
||||
|
||||
|
@ -18,6 +18,13 @@ To get started with PVC, please see the [About](https://parallelvirtualcluster.r
|
||||
|
||||
## Changelog
|
||||
|
||||
#### v0.9.9
|
||||
|
||||
* Adds documentation updates
|
||||
* Removes single-element list stripping and fixes surrounding bugs
|
||||
* Adds additional fields to some API endpoints for ease of parsing by clients
|
||||
* Fixes bugs with network configuration
|
||||
|
||||
#### v0.9.8
|
||||
|
||||
* Adds support for cluster backup/restore
|
||||
|
@ -36,7 +36,7 @@ The PVC role configures all the dependencies of PVC, including storage, networki
|
||||
|
||||
* Install and configure FRRouting.
|
||||
|
||||
* Install and configure the main PVC daemon and API client, including initializing the PVC cluster (`pvc init`).
|
||||
* Install and configure the main PVC daemon and API client, including initializing the PVC cluster (`pvc task init`).
|
||||
|
||||
## Completion
|
||||
|
||||
|
@ -10,6 +10,9 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"Cluster Data": {
|
||||
"type": "object"
|
||||
},
|
||||
"ClusterStatus": {
|
||||
"properties": {
|
||||
"health": {
|
||||
@ -156,6 +159,10 @@
|
||||
},
|
||||
"VMMetadata": {
|
||||
"properties": {
|
||||
"migration_method": {
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the VM",
|
||||
"type": "string"
|
||||
@ -671,6 +678,10 @@
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"volume_count": {
|
||||
"description": "The number of volumes in the pool",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@ -954,6 +965,10 @@
|
||||
"description": "Internal provisioner template ID",
|
||||
"type": "integer"
|
||||
},
|
||||
"migration_method": {
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Template name",
|
||||
"type": "string"
|
||||
@ -1158,6 +1173,10 @@
|
||||
"description": "Whether the VM has been migrated, either \"no\" or \"from <last_node>\"",
|
||||
"type": "string"
|
||||
},
|
||||
"migration_method": {
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the VM",
|
||||
"type": "string"
|
||||
@ -1198,6 +1217,10 @@
|
||||
"description": "The PVC network type",
|
||||
"type": "string"
|
||||
},
|
||||
"vni": {
|
||||
"description": "The VNI (PVC network) of the network bridge",
|
||||
"type": "integer"
|
||||
},
|
||||
"wr_bytes": {
|
||||
"description": "The number of write bytes on the interface",
|
||||
"type": "integer"
|
||||
@ -1397,9 +1420,38 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/backup": {
|
||||
"get": {
|
||||
"description": "",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Cluster Data"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request"
|
||||
}
|
||||
},
|
||||
"summary": "Back up the Zookeeper data of a cluster in JSON format",
|
||||
"tags": [
|
||||
"root"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/initialize": {
|
||||
"post": {
|
||||
"description": "Note: Normally used only once during cluster bootstrap; checks for the existence of the \"/primary_node\" key before proceeding and returns 400 if found",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "A confirmation string to ensure that the API consumer really means it",
|
||||
"in": "query",
|
||||
"name": "yes-i-really-mean-it",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@ -3933,6 +3985,13 @@
|
||||
"name": "node_autostart",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -4056,6 +4115,13 @@
|
||||
"name": "node_autostart",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -4127,6 +4193,12 @@
|
||||
"in": "query",
|
||||
"name": "node_autostart",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -4319,6 +4391,51 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/restore": {
|
||||
"post": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "A confirmation string to ensure that the API consumer really means it",
|
||||
"in": "query",
|
||||
"name": "yes-i-really-mean-it",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The raw JSON cluster backup data",
|
||||
"in": "query",
|
||||
"name": "cluster_data",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Restore error or code failure",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Restore a backup over the cluster; destroys the existing data",
|
||||
"tags": [
|
||||
"root"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/status": {
|
||||
"get": {
|
||||
"description": "",
|
||||
@ -5463,6 +5580,19 @@
|
||||
"name": "autostart",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"default": "none",
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"enum": [
|
||||
"live",
|
||||
"shutdown",
|
||||
"none"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -5587,6 +5717,19 @@
|
||||
"name": "autostart",
|
||||
"required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"default": "none",
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"enum": [
|
||||
"live",
|
||||
"shutdown",
|
||||
"none"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -5758,6 +5901,19 @@
|
||||
"name": "profile",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"default": "none",
|
||||
"description": "The preferred migration method (live, shutdown, none)",
|
||||
"enum": [
|
||||
"live",
|
||||
"shutdown",
|
||||
"none"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "migration_method",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
|
@ -54,7 +54,7 @@ import pvcnoded.CephInstance as CephInstance
|
||||
import pvcnoded.MetadataAPIInstance as MetadataAPIInstance
|
||||
|
||||
# Version string for startup output
|
||||
version = '0.9.8'
|
||||
version = '0.9.9'
|
||||
|
||||
###############################################################################
|
||||
# PVCD - node daemon startup program
|
||||
|
Reference in New Issue
Block a user