Massive rejigger into single daemon
Completely restructure the daemon code to move the 4 discrete daemons into a single daemon that can be run on every hypervisor. Introduce the idea of a static list of "coordinator" nodes which are configured at install time to run Zookeeper and FRR in router mode, and which are allowed to take on client network management duties (gateway, DHCP, DNS, etc.) while also allowing them to run VMs (i.e. no dedicated "router" nodes required).
This commit is contained in:
@ -1,232 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# router.py - PVC client function library, router management
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
import re
|
||||
import tempfile
|
||||
import subprocess
|
||||
import difflib
|
||||
import colorama
|
||||
import click
|
||||
import lxml.objectify
|
||||
import configparser
|
||||
import kazoo.client
|
||||
|
||||
import client_lib.ansiiprint as ansiiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
|
||||
def getInformationFromRouter(zk_conn, router_name, long_output):
|
||||
router_daemon_state = zk_conn.get('/routers/{}/daemonstate'.format(router_name))[0].decode('ascii')
|
||||
router_network_state = zk_conn.get('/routers/{}/networkstate'.format(router_name))[0].decode('ascii')
|
||||
router_cpu_count = zk_conn.get('/routers/{}/staticdata'.format(router_name))[0].decode('ascii').split()[0]
|
||||
router_cpu_load = zk_conn.get('/routers/{}/cpuload'.format(router_name))[0].decode('ascii').split()[0]
|
||||
router_kernel = zk_conn.get('/routers/{}/staticdata'.format(router_name))[0].decode('ascii').split()[1]
|
||||
router_os = zk_conn.get('/routers/{}/staticdata'.format(router_name))[0].decode('ascii').split()[2]
|
||||
router_arch = zk_conn.get('/routers/{}/staticdata'.format(router_name))[0].decode('ascii').split()[3]
|
||||
|
||||
if router_daemon_state == 'run':
|
||||
daemon_state_colour = ansiiprint.green()
|
||||
elif router_daemon_state == 'stop':
|
||||
daemon_state_colour = ansiiprint.red()
|
||||
elif router_daemon_state == 'init':
|
||||
daemon_state_colour = ansiiprint.yellow()
|
||||
elif router_daemon_state == 'dead':
|
||||
daemon_state_colour = ansiiprint.red() + ansiiprint.bold()
|
||||
else:
|
||||
daemon_state_colour = ansiiprint.blue()
|
||||
|
||||
if router_network_state == 'primary':
|
||||
network_state_colour = ansiiprint.green()
|
||||
else:
|
||||
network_state_colour = ansiiprint.blue()
|
||||
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
ainformation = []
|
||||
ainformation.append('{}Router information:{}'.format(ansiiprint.bold(), ansiiprint.end()))
|
||||
ainformation.append('')
|
||||
# Basic information
|
||||
ainformation.append('{}Name:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_name))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), daemon_state_colour, router_daemon_state, ansiiprint.end()))
|
||||
ainformation.append('{}Network State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), network_state_colour, router_network_state, ansiiprint.end()))
|
||||
ainformation.append('{}CPUs:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_cpu_count))
|
||||
ainformation.append('{}Load:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_cpu_load))
|
||||
if long_output == True:
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Architecture:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_arch))
|
||||
ainformation.append('{}Operating System:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_os))
|
||||
ainformation.append('{}Kernel Version:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), router_kernel))
|
||||
|
||||
# Join it all together
|
||||
information = '\n'.join(ainformation)
|
||||
return information
|
||||
|
||||
#
|
||||
# Direct Functions
|
||||
#
|
||||
def secondary_router(zk_conn, router):
|
||||
# Verify router is valid
|
||||
if not common.verifyRouter(zk_conn, router):
|
||||
return False, 'ERROR: No router named "{}" is present in the cluster.'.format(router)
|
||||
|
||||
# Get current state
|
||||
current_state = zkhandler.readdata(zk_conn, '/routers/{}/networkstate'.format(router))
|
||||
if current_state == 'primary':
|
||||
click.echo('Setting router {} in secondary mode.'.format(router))
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/routers': 'none'
|
||||
})
|
||||
else:
|
||||
click.echo('Router {} is already in secondary mode.'.format(router))
|
||||
|
||||
return True, ''
|
||||
|
||||
def primary_router(zk_conn, router):
|
||||
# Verify router is valid
|
||||
if not common.verifyRouter(zk_conn, router):
|
||||
return False, 'ERROR: No router named "{}" is present in the cluster.'.format(router)
|
||||
|
||||
# Get current state
|
||||
current_state = zkhandler.readdata(zk_conn, '/routers/{}/networkstate'.format(router))
|
||||
if current_state == 'secondary':
|
||||
click.echo('Setting router {} in primary mode.'.format(router))
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/routers': router
|
||||
})
|
||||
else:
|
||||
click.echo('Router {} is already in primary mode.'.format(router))
|
||||
|
||||
return True, ''
|
||||
|
||||
def get_info(zk_conn, router, long_output):
|
||||
# Verify router is valid
|
||||
if not common.verifyRouter(zk_conn, router):
|
||||
return False, 'ERROR: No router named "{}" is present in the cluster.'.format(router)
|
||||
|
||||
# Get information about router in a pretty format
|
||||
information = getInformationFromRouter(zk_conn, router, long_output)
|
||||
click.echo(information)
|
||||
return True, ''
|
||||
|
||||
def get_list(zk_conn, limit):
|
||||
# Match our limit
|
||||
router_list = []
|
||||
full_router_list = zk_conn.get_children('/routers')
|
||||
for router in full_router_list:
|
||||
if limit != None:
|
||||
try:
|
||||
# Implcitly assume fuzzy limits
|
||||
if re.match('\^.*', limit) == None:
|
||||
limit = '.*' + limit
|
||||
if re.match('.*\$', limit) == None:
|
||||
limit = limit + '.*'
|
||||
|
||||
if re.match(limit, router) != None:
|
||||
router_list.append(router)
|
||||
except Exception as e:
|
||||
return False, 'Regex Error: {}'.format(e)
|
||||
else:
|
||||
router_list.append(router)
|
||||
|
||||
router_list_output = []
|
||||
router_daemon_state = {}
|
||||
router_network_state = {}
|
||||
router_cpu_count = {}
|
||||
router_cpu_load = {}
|
||||
|
||||
# Gather information for printing
|
||||
for router_name in router_list:
|
||||
router_daemon_state[router_name] = zk_conn.get('/routers/{}/daemonstate'.format(router_name))[0].decode('ascii')
|
||||
router_network_state[router_name] = zk_conn.get('/routers/{}/networkstate'.format(router_name))[0].decode('ascii')
|
||||
router_cpu_count[router_name] = zk_conn.get('/routers/{}/staticdata'.format(router_name))[0].decode('ascii').split()[0]
|
||||
router_cpu_load[router_name] = zk_conn.get('/routers/{}/cpuload'.format(router_name))[0].decode('ascii').split()[0]
|
||||
|
||||
# Determine optimal column widths
|
||||
# Dynamic columns: router_name
|
||||
router_name_length = 0
|
||||
for router_name in router_list:
|
||||
# router_name column
|
||||
_router_name_length = len(router_name) + 1
|
||||
if _router_name_length > router_name_length:
|
||||
router_name_length = _router_name_length
|
||||
|
||||
# Format the string (header)
|
||||
router_list_output.append(
|
||||
'{bold}{router_name: <{router_name_length}} \
|
||||
State: {daemon_state_colour}{router_daemon_state: <7}{end_colour} {network_state_colour}{router_network_state: <10}{end_colour} \
|
||||
Resources: {router_cpu_count: <5} {router_cpu_load: <6}{end_bold}'.format(
|
||||
router_name_length=router_name_length,
|
||||
bold=ansiiprint.bold(),
|
||||
end_bold=ansiiprint.end(),
|
||||
daemon_state_colour='',
|
||||
network_state_colour='',
|
||||
end_colour='',
|
||||
router_name='Name',
|
||||
router_daemon_state='Daemon',
|
||||
router_network_state='Network',
|
||||
router_cpu_count='CPUs',
|
||||
router_cpu_load='Load'
|
||||
)
|
||||
)
|
||||
|
||||
# Format the string (elements)
|
||||
for router_name in router_list:
|
||||
if router_daemon_state[router_name] == 'run':
|
||||
daemon_state_colour = ansiiprint.green()
|
||||
elif router_daemon_state[router_name] == 'stop':
|
||||
daemon_state_colour = ansiiprint.red()
|
||||
elif router_daemon_state[router_name] == 'init':
|
||||
daemon_state_colour = ansiiprint.yellow()
|
||||
elif router_daemon_state[router_name] == 'dead':
|
||||
daemon_state_colour = ansiiprint.red() + ansiiprint.bold()
|
||||
else:
|
||||
daemon_state_colour = ansiiprint.blue()
|
||||
|
||||
if router_network_state[router_name] == 'primary':
|
||||
network_state_colour = ansiiprint.green()
|
||||
else:
|
||||
network_state_colour = ansiiprint.blue()
|
||||
|
||||
router_list_output.append(
|
||||
'{bold}{router_name: <{router_name_length}} \
|
||||
{daemon_state_colour}{router_daemon_state: <7}{end_colour} {network_state_colour}{router_network_state: <10}{end_colour} \
|
||||
{router_cpu_count: <5} {router_cpu_load: <6}{end_bold}'.format(
|
||||
router_name_length=router_name_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
daemon_state_colour=daemon_state_colour,
|
||||
network_state_colour=network_state_colour,
|
||||
end_colour=ansiiprint.end(),
|
||||
router_name=router_name,
|
||||
router_daemon_state=router_daemon_state[router_name],
|
||||
router_network_state=router_network_state[router_name],
|
||||
router_cpu_count=router_cpu_count[router_name],
|
||||
router_cpu_load=router_cpu_load[router_name]
|
||||
)
|
||||
)
|
||||
|
||||
click.echo('\n'.join(sorted(router_list_output)))
|
||||
|
||||
return True, ''
|
@ -78,9 +78,9 @@ def getDomainMainDetails(parsed_xml):
|
||||
dmemory = str(parsed_xml.memory)
|
||||
dmemory_unit = str(parsed_xml.memory.attrib['unit'])
|
||||
if dmemory_unit == 'KiB':
|
||||
dmemory = str(int(dmemory) * 1024)
|
||||
dmemory = int(int(dmemory) / 1024)
|
||||
elif dmemory_unit == 'GiB':
|
||||
dmemory = str(int(dmemory) / 1024)
|
||||
dmemory = int(int(dmemory) * 1024)
|
||||
dvcpu = str(parsed_xml.vcpu)
|
||||
try:
|
||||
dvcputopo = '{}/{}/{}'.format(parsed_xml.cpu.topology.attrib['sockets'], parsed_xml.cpu.topology.attrib['cores'], parsed_xml.cpu.topology.attrib['threads'])
|
||||
@ -185,106 +185,106 @@ def verifyRouter(zk_conn, router):
|
||||
|
||||
|
||||
#
|
||||
# Get the list of valid target hypervisors
|
||||
# Get the list of valid target nodes
|
||||
#
|
||||
def getHypervisors(zk_conn, dom_uuid):
|
||||
valid_hypervisor_list = []
|
||||
full_hypervisor_list = zk_conn.get_children('/nodes')
|
||||
def getNodes(zk_conn, dom_uuid):
|
||||
valid_node_list = []
|
||||
full_node_list = zk_conn.get_children('/nodes')
|
||||
|
||||
try:
|
||||
current_hypervisor = zk_conn.get('/domains/{}/hypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||
current_node = zk_conn.get('/domains/{}/node'.format(dom_uuid))[0].decode('ascii')
|
||||
except:
|
||||
current_hypervisor = None
|
||||
current_node = None
|
||||
|
||||
for hypervisor in full_hypervisor_list:
|
||||
daemon_state = zk_conn.get('/nodes/{}/daemonstate'.format(hypervisor))[0].decode('ascii')
|
||||
domain_state = zk_conn.get('/nodes/{}/domainstate'.format(hypervisor))[0].decode('ascii')
|
||||
for node in full_node_list:
|
||||
daemon_state = zk_conn.get('/nodes/{}/daemonstate'.format(node))[0].decode('ascii')
|
||||
domain_state = zk_conn.get('/nodes/{}/domainstate'.format(node))[0].decode('ascii')
|
||||
|
||||
if hypervisor == current_hypervisor:
|
||||
if node == current_node:
|
||||
continue
|
||||
|
||||
if daemon_state != 'run' or domain_state != 'ready':
|
||||
continue
|
||||
|
||||
valid_hypervisor_list.append(hypervisor)
|
||||
valid_node_list.append(node)
|
||||
|
||||
return valid_hypervisor_list
|
||||
return valid_node_list
|
||||
|
||||
#
|
||||
# Find a migration target
|
||||
#
|
||||
def findTargetHypervisor(zk_conn, search_field, dom_uuid):
|
||||
def findTargetNode(zk_conn, search_field, dom_uuid):
|
||||
if search_field == 'mem':
|
||||
return findTargetHypervisorMem(zk_conn, dom_uuid)
|
||||
return findTargetNodeMem(zk_conn, dom_uuid)
|
||||
if search_field == 'load':
|
||||
return findTargetHypervisorLoad(zk_conn, dom_uuid)
|
||||
return findTargetNodeLoad(zk_conn, dom_uuid)
|
||||
if search_field == 'vcpus':
|
||||
return findTargetHypervisorVCPUs(zk_conn, dom_uuid)
|
||||
return findTargetNodeVCPUs(zk_conn, dom_uuid)
|
||||
if search_field == 'vms':
|
||||
return findTargetHypervisorVMs(zk_conn, dom_uuid)
|
||||
return findTargetNodeVMs(zk_conn, dom_uuid)
|
||||
return None
|
||||
|
||||
# via free memory (relative to allocated memory)
|
||||
def findTargetHypervisorMem(zk_conn, dom_uuid):
|
||||
def findTargetNodeMem(zk_conn, dom_uuid):
|
||||
most_allocfree = 0
|
||||
target_hypervisor = None
|
||||
target_node = None
|
||||
|
||||
hypervisor_list = getHypervisors(zk_conn, dom_uuid)
|
||||
for hypervisor in hypervisor_list:
|
||||
memalloc = int(zk_conn.get('/nodes/{}/memalloc'.format(hypervisor))[0].decode('ascii'))
|
||||
memused = int(zk_conn.get('/nodes/{}/memused'.format(hypervisor))[0].decode('ascii'))
|
||||
memfree = int(zk_conn.get('/nodes/{}/memfree'.format(hypervisor))[0].decode('ascii'))
|
||||
node_list = getNodes(zk_conn, dom_uuid)
|
||||
for node in node_list:
|
||||
memalloc = int(zk_conn.get('/nodes/{}/memalloc'.format(node))[0].decode('ascii'))
|
||||
memused = int(zk_conn.get('/nodes/{}/memused'.format(node))[0].decode('ascii'))
|
||||
memfree = int(zk_conn.get('/nodes/{}/memfree'.format(node))[0].decode('ascii'))
|
||||
memtotal = memused + memfree
|
||||
allocfree = memtotal - memalloc
|
||||
|
||||
if allocfree > most_allocfree:
|
||||
most_allocfree = allocfree
|
||||
target_hypervisor = hypervisor
|
||||
target_node = node
|
||||
|
||||
return target_hypervisor
|
||||
return target_node
|
||||
|
||||
# via load average
|
||||
def findTargetHypervisorLoad(zk_conn, dom_uuid):
|
||||
def findTargetNodeLoad(zk_conn, dom_uuid):
|
||||
least_load = 9999
|
||||
target_hypervisor = None
|
||||
target_node = None
|
||||
|
||||
hypervisor_list = getHypervisors(zk_conn, dom_uuid)
|
||||
for hypervisor in hypervisor_list:
|
||||
load = float(zk_conn.get('/nodes/{}/cpuload'.format(hypervisor))[0].decode('ascii'))
|
||||
node_list = getNodes(zk_conn, dom_uuid)
|
||||
for node in node_list:
|
||||
load = float(zk_conn.get('/nodes/{}/cpuload'.format(node))[0].decode('ascii'))
|
||||
|
||||
if load < least_load:
|
||||
least_load = load
|
||||
target_hypervisor = hypervisor
|
||||
target_node = node
|
||||
|
||||
return target_hypervisor
|
||||
return target_node
|
||||
|
||||
# via total vCPUs
|
||||
def findTargetHypervisorVCPUs(zk_conn, dom_uuid):
|
||||
def findTargetNodeVCPUs(zk_conn, dom_uuid):
|
||||
least_vcpus = 9999
|
||||
target_hypervisor = None
|
||||
target_node = None
|
||||
|
||||
hypervisor_list = getHypervisors(zk_conn, dom_uuid)
|
||||
for hypervisor in hypervisor_list:
|
||||
vcpus = int(zk_conn.get('/nodes/{}/vcpualloc'.format(hypervisor))[0].decode('ascii'))
|
||||
node_list = getNodes(zk_conn, dom_uuid)
|
||||
for node in node_list:
|
||||
vcpus = int(zk_conn.get('/nodes/{}/vcpualloc'.format(node))[0].decode('ascii'))
|
||||
|
||||
if vcpus < least_vcpus:
|
||||
least_vcpus = vcpus
|
||||
target_hypervisor = hypervisor
|
||||
target_node = node
|
||||
|
||||
return target_hypervisor
|
||||
return target_node
|
||||
|
||||
# via total VMs
|
||||
def findTargetHypervisorVMs(zk_conn, dom_uuid):
|
||||
def findTargetNodeVMs(zk_conn, dom_uuid):
|
||||
least_vms = 9999
|
||||
target_hypervisor = None
|
||||
target_node = None
|
||||
|
||||
hypervisor_list = getHypervisors(zk_conn, dom_uuid)
|
||||
for hypervisor in hypervisor_list:
|
||||
vms = int(zk_conn.get('/nodes/{}/domainscount'.format(hypervisor))[0].decode('ascii'))
|
||||
node_list = getNodes(zk_conn, dom_uuid)
|
||||
for node in node_list:
|
||||
vms = int(zk_conn.get('/nodes/{}/domainscount'.format(node))[0].decode('ascii'))
|
||||
|
||||
if vms < least_vms:
|
||||
least_vms = vms
|
||||
target_hypervisor = hypervisor
|
||||
target_node = node
|
||||
|
||||
return target_hypervisor
|
||||
return target_node
|
||||
|
@ -35,30 +35,26 @@ import configparser
|
||||
import kazoo.client
|
||||
|
||||
import client_lib.ansiiprint as ansiiprint
|
||||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
import client_lib.vm as pvc_vm
|
||||
|
||||
def getInformationFromNode(zk_conn, node_name, long_output):
|
||||
node_daemon_state = zk_conn.get('/nodes/{}/daemonstate'.format(node_name))[0].decode('ascii')
|
||||
node_domain_state = zk_conn.get('/nodes/{}/domainstate'.format(node_name))[0].decode('ascii')
|
||||
node_cpu_count = zk_conn.get('/nodes/{}/staticdata'.format(node_name))[0].decode('ascii').split()[0]
|
||||
node_kernel = zk_conn.get('/nodes/{}/staticdata'.format(node_name))[0].decode('ascii').split()[1]
|
||||
node_os = zk_conn.get('/nodes/{}/staticdata'.format(node_name))[0].decode('ascii').split()[2]
|
||||
node_arch = zk_conn.get('/nodes/{}/staticdata'.format(node_name))[0].decode('ascii').split()[3]
|
||||
node_mem_used = zk_conn.get('/nodes/{}/memused'.format(node_name))[0].decode('ascii')
|
||||
node_mem_free = zk_conn.get('/nodes/{}/memfree'.format(node_name))[0].decode('ascii')
|
||||
node_mem_total = int(node_mem_used) + int(node_mem_free)
|
||||
node_load = zk_conn.get('/nodes/{}/cpuload'.format(node_name))[0].decode('ascii')
|
||||
node_domains_count = zk_conn.get('/nodes/{}/domainscount'.format(node_name))[0].decode('ascii')
|
||||
node_running_domains = zk_conn.get('/nodes/{}/runningdomains'.format(node_name))[0].decode('ascii').split()
|
||||
node_mem_allocated = 0
|
||||
for domain in node_running_domains:
|
||||
try:
|
||||
parsed_xml = common.getDomainXML(zk_conn, domain)
|
||||
duuid, dname, ddescription, dmemory, dvcpu, dvcputopo = common.getDomainMainDetails(parsed_xml)
|
||||
node_mem_allocated += int(dmemory)
|
||||
except AttributeError:
|
||||
click.echo('Error: Domain {} does not exist.'.format(domain))
|
||||
node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
|
||||
node_router_state = zkhandler.readdata(zk_conn, '/nodes/{}/routerstate'.format(node_name))
|
||||
node_domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name))
|
||||
node_static_data = zkhandler.readdata(zk_conn, '/nodes/{}/staticdata'.format(node_name)).split()
|
||||
node_cpu_count = node_static_data[0]
|
||||
node_kernel = node_static_data[1]
|
||||
node_os = node_static_data[2]
|
||||
node_arch = node_static_data[3]
|
||||
node_mem_allocated = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node_name)))
|
||||
node_mem_used = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node_name)))
|
||||
node_mem_free = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node_name)))
|
||||
node_mem_total = node_mem_used + node_mem_free
|
||||
node_load = zkhandler.readdata(zk_conn, '/nodes/{}/cpuload'.format(node_name))
|
||||
node_domains_count = zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node_name))
|
||||
node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
|
||||
|
||||
if node_daemon_state == 'run':
|
||||
daemon_state_colour = ansiiprint.green()
|
||||
@ -71,6 +67,13 @@ def getInformationFromNode(zk_conn, node_name, long_output):
|
||||
else:
|
||||
daemon_state_colour = ansiiprint.blue()
|
||||
|
||||
if node_router_state == 'primary':
|
||||
router_state_colour = ansiiprint.green()
|
||||
elif node_router_state == 'secondary':
|
||||
router_state_colour = ansiiprint.blue()
|
||||
else:
|
||||
router_state_colour = ansiiprint.purple()
|
||||
|
||||
if node_domain_state == 'ready':
|
||||
domain_state_colour = ansiiprint.green()
|
||||
else:
|
||||
@ -83,6 +86,7 @@ def getInformationFromNode(zk_conn, node_name, long_output):
|
||||
# Basic information
|
||||
ainformation.append('{}Name:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), node_name))
|
||||
ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), daemon_state_colour, node_daemon_state, ansiiprint.end()))
|
||||
ainformation.append('{}Router State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), router_state_colour, node_router_state, ansiiprint.end()))
|
||||
ainformation.append('{}Domain State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), domain_state_colour, node_domain_state, ansiiprint.end()))
|
||||
ainformation.append('{}Active VM Count:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), node_domains_count))
|
||||
if long_output == True:
|
||||
@ -105,6 +109,50 @@ def getInformationFromNode(zk_conn, node_name, long_output):
|
||||
#
|
||||
# Direct Functions
|
||||
#
|
||||
def secondary_node(zk_conn, node):
|
||||
# Verify node is valid
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
|
||||
|
||||
# Ensure node is a coordinator
|
||||
daemon_mode = zkhandler.readdata(zk_conn, '/nodes/{}/daemonmode'.format(node))
|
||||
if daemon_mode == 'hypervisor':
|
||||
return False, 'ERROR: Cannot change router mode on non-coordinator node "{}"'.format(node)
|
||||
|
||||
# Get current state
|
||||
current_state = zkhandler.readdata(zk_conn, '/nodes/{}/routerstate'.format(node))
|
||||
if current_state == 'primary':
|
||||
click.echo('Setting node {} in secondary router mode.'.format(node))
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/primary_node': 'none'
|
||||
})
|
||||
else:
|
||||
click.echo('Node {} is already in secondary router mode.'.format(node))
|
||||
|
||||
return True, ''
|
||||
|
||||
def primary_node(zk_conn, node):
|
||||
# Verify node is valid
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
|
||||
|
||||
# Ensure node is a coordinator
|
||||
daemon_mode = zkhandler.readdata(zk_conn, '/nodes/{}/daemonmode'.format(node))
|
||||
if daemon_mode == 'hypervisor':
|
||||
return False, 'ERROR: Cannot change router mode on non-coordinator node "{}"'.format(node)
|
||||
|
||||
# Get current state
|
||||
current_state = zkhandler.readdata(zk_conn, '/nodes/{}/routerstate'.format(node))
|
||||
if current_state == 'secondary':
|
||||
click.echo('Setting node {} in primary router mode.'.format(node))
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/primary_node': node
|
||||
})
|
||||
else:
|
||||
click.echo('Node {} is already in primary router mode.'.format(node))
|
||||
|
||||
return True, ''
|
||||
|
||||
def flush_node(zk_conn, node, wait):
|
||||
# Verify node is valid
|
||||
if not common.verifyNode(zk_conn, node):
|
||||
@ -113,14 +161,14 @@ def flush_node(zk_conn, node, wait):
|
||||
click.echo('Flushing hypervisor {} of running VMs.'.format(node))
|
||||
|
||||
# Add the new domain to Zookeeper
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/nodes/{}/domainstate'.format(node), 'flush'.encode('ascii'))
|
||||
results = transaction.commit()
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/nodes/{}/domainstate'.format(node): 'flush'
|
||||
})
|
||||
|
||||
if wait == True:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
node_state = zk_conn.get('/nodes/{}/domainstate'.format(node))[0].decode('ascii')
|
||||
node_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node))
|
||||
if node_state == "flushed":
|
||||
break
|
||||
|
||||
@ -134,9 +182,9 @@ def ready_node(zk_conn, node):
|
||||
click.echo('Restoring hypervisor {} to active service.'.format(node))
|
||||
|
||||
# Add the new domain to Zookeeper
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/nodes/{}/domainstate'.format(node), 'unflush'.encode('ascii'))
|
||||
results = transaction.commit()
|
||||
zkhandler.writedata(zk_conn, {
|
||||
'/nodes/{}/domainstate'.format(node): 'unflush'
|
||||
})
|
||||
|
||||
return True, ''
|
||||
|
||||
@ -186,6 +234,7 @@ def get_list(zk_conn, limit):
|
||||
|
||||
node_list_output = []
|
||||
node_daemon_state = {}
|
||||
node_router_state = {}
|
||||
node_domain_state = {}
|
||||
node_cpu_count = {}
|
||||
node_mem_used = {}
|
||||
@ -198,55 +247,69 @@ def get_list(zk_conn, limit):
|
||||
|
||||
# Gather information for printing
|
||||
for node_name in node_list:
|
||||
node_daemon_state[node_name] = zk_conn.get('/nodes/{}/daemonstate'.format(node_name))[0].decode('ascii')
|
||||
node_domain_state[node_name] = zk_conn.get('/nodes/{}/domainstate'.format(node_name))[0].decode('ascii')
|
||||
node_cpu_count[node_name] = zk_conn.get('/nodes/{}/staticdata'.format(node_name))[0].decode('ascii').split()[0]
|
||||
node_mem_used[node_name] = zk_conn.get('/nodes/{}/memused'.format(node_name))[0].decode('ascii')
|
||||
node_mem_free[node_name] = zk_conn.get('/nodes/{}/memfree'.format(node_name))[0].decode('ascii')
|
||||
node_mem_total[node_name] = int(node_mem_used[node_name]) + int(node_mem_free[node_name])
|
||||
node_load[node_name] = zk_conn.get('/nodes/{}/cpuload'.format(node_name))[0].decode('ascii')
|
||||
node_domains_count[node_name] = zk_conn.get('/nodes/{}/domainscount'.format(node_name))[0].decode('ascii')
|
||||
node_running_domains[node_name] = zk_conn.get('/nodes/{}/runningdomains'.format(node_name))[0].decode('ascii').split()
|
||||
node_mem_allocated[node_name] = 0
|
||||
for domain in node_running_domains[node_name]:
|
||||
try:
|
||||
parsed_xml = common.getDomainXML(zk_conn, domain)
|
||||
duuid, dname, ddescription, dmemory, dvcpu, dvcputopo = common.getDomainMainDetails(parsed_xml)
|
||||
node_mem_allocated[node_name] += int(dmemory)
|
||||
except AttributeError:
|
||||
click.echo('Error: Domain {} does not exist.'.format(domain))
|
||||
node_daemon_state[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
|
||||
node_router_state[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/routerstate'.format(node_name))
|
||||
node_domain_state[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name))
|
||||
node_cpu_count[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/staticdata'.format(node_name)).split()[0]
|
||||
node_mem_allocated[node_name] = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node_name)))
|
||||
node_mem_used[node_name] = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node_name)))
|
||||
node_mem_free[node_name] = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node_name)))
|
||||
node_mem_total[node_name] = node_mem_used[node_name] + node_mem_free[node_name]
|
||||
node_load[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/cpuload'.format(node_name))
|
||||
node_domains_count[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node_name))
|
||||
node_running_domains[node_name] = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
|
||||
|
||||
# Determine optimal column widths
|
||||
# Dynamic columns: node_name, hypervisor, migrated
|
||||
node_name_length = 0
|
||||
# Dynamic columns: node_name, daemon_state, network_state, domain_state, load
|
||||
node_name_length = 5
|
||||
daemon_state_length = 7
|
||||
router_state_length = 7
|
||||
domain_state_length = 7
|
||||
for node_name in node_list:
|
||||
# node_name column
|
||||
_node_name_length = len(node_name) + 1
|
||||
if _node_name_length > node_name_length:
|
||||
node_name_length = _node_name_length
|
||||
# daemon_state column
|
||||
_daemon_state_length = len(node_daemon_state[node_name]) + 1
|
||||
if _daemon_state_length > daemon_state_length:
|
||||
daemon_state_length = _daemon_state_length
|
||||
# router_state column
|
||||
_router_state_length = len(node_router_state[node_name]) + 1
|
||||
if _router_state_length > router_state_length:
|
||||
router_state_length = _router_state_length
|
||||
# domain_state column
|
||||
_domain_state_length = len(node_domain_state[node_name]) + 1
|
||||
if _domain_state_length > domain_state_length:
|
||||
domain_state_length = _domain_state_length
|
||||
|
||||
# Format the string (header)
|
||||
node_list_output.append(
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
State: {daemon_state_colour}{node_daemon_state: <7}{end_colour} {domain_state_colour}{node_domain_state: <8}{end_colour} \
|
||||
State: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {router_state_colour}{node_router_state: <{router_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
Resources: {node_domains_count: <4} {node_cpu_count: <5} {node_load: <6} \
|
||||
RAM (MiB): {node_mem_total: <6} {node_mem_used: <6} {node_mem_free: <6} {node_mem_allocated: <6}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
router_state_length=router_state_length,
|
||||
domain_state_length=domain_state_length,
|
||||
bold=ansiiprint.bold(),
|
||||
end_bold=ansiiprint.end(),
|
||||
daemon_state_colour='',
|
||||
router_state_colour='',
|
||||
domain_state_colour='',
|
||||
end_colour='',
|
||||
node_name='Name',
|
||||
node_daemon_state='Daemon',
|
||||
node_domain_state='Domains',
|
||||
node_router_state='Router',
|
||||
node_domain_state='Domain',
|
||||
node_domains_count='VMs',
|
||||
node_cpu_count='CPUs',
|
||||
node_load='Load',
|
||||
node_mem_total='Total',
|
||||
node_mem_used='Used',
|
||||
node_mem_free='Free',
|
||||
node_mem_allocated='VMs',
|
||||
node_mem_allocated='VMs'
|
||||
)
|
||||
)
|
||||
|
||||
@ -263,7 +326,14 @@ RAM (MiB): {node_mem_total: <6} {node_mem_used: <6} {node_mem_free: <6} {node_me
|
||||
else:
|
||||
daemon_state_colour = ansiiprint.blue()
|
||||
|
||||
if node_mem_allocated[node_name] >= node_mem_total[node_name]:
|
||||
if node_router_state[node_name] == 'primary':
|
||||
router_state_colour = ansiiprint.green()
|
||||
elif node_router_state[node_name] == 'secondary':
|
||||
router_state_colour = ansiiprint.blue()
|
||||
else:
|
||||
router_state_colour = ansiiprint.purple()
|
||||
|
||||
if node_mem_allocated[node_name] != 0 and node_mem_allocated[node_name] >= node_mem_total[node_name]:
|
||||
node_domain_state[node_name] = 'overprov'
|
||||
domain_state_colour = ansiiprint.yellow()
|
||||
elif node_domain_state[node_name] == 'ready':
|
||||
@ -273,17 +343,22 @@ RAM (MiB): {node_mem_total: <6} {node_mem_used: <6} {node_mem_free: <6} {node_me
|
||||
|
||||
node_list_output.append(
|
||||
'{bold}{node_name: <{node_name_length}} \
|
||||
{daemon_state_colour}{node_daemon_state: <7}{end_colour} {domain_state_colour}{node_domain_state: <8}{end_colour} \
|
||||
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {router_state_colour}{node_router_state: <{router_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
{node_domains_count: <4} {node_cpu_count: <5} {node_load: <6} \
|
||||
{node_mem_total: <6} {node_mem_used: <6} {node_mem_free: <6} {node_mem_allocated: <6}{end_bold}'.format(
|
||||
node_name_length=node_name_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
router_state_length=router_state_length,
|
||||
domain_state_length=domain_state_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
daemon_state_colour=daemon_state_colour,
|
||||
router_state_colour=router_state_colour,
|
||||
domain_state_colour=domain_state_colour,
|
||||
end_colour=ansiiprint.end(),
|
||||
node_name=node_name,
|
||||
node_daemon_state=node_daemon_state[node_name],
|
||||
node_router_state=node_router_state[node_name],
|
||||
node_domain_state=node_domain_state[node_name],
|
||||
node_domains_count=node_domains_count[node_name],
|
||||
node_cpu_count=node_cpu_count[node_name],
|
@ -44,25 +44,22 @@ def getInformationFromXML(zk_conn, uuid, long_output):
|
||||
# Obtain the contents of the XML from Zookeeper
|
||||
try:
|
||||
dstate = zk_conn.get('/domains/{}/state'.format(uuid))[0].decode('ascii')
|
||||
dhypervisor = zk_conn.get('/domains/{}/hypervisor'.format(uuid))[0].decode('ascii')
|
||||
dlasthypervisor = zk_conn.get('/domains/{}/lasthypervisor'.format(uuid))[0].decode('ascii')
|
||||
dnode = zk_conn.get('/domains/{}/node'.format(uuid))[0].decode('ascii')
|
||||
dlastnode = zk_conn.get('/domains/{}/lastnode'.format(uuid))[0].decode('ascii')
|
||||
except:
|
||||
return None
|
||||
|
||||
if dlasthypervisor == '':
|
||||
dlasthypervisor = 'N/A'
|
||||
if dlastnode == '':
|
||||
dlastnode = 'N/A'
|
||||
|
||||
try:
|
||||
parsed_xml = common.getDomainXML(zk_conn, uuid)
|
||||
duuid, dname, ddescription, dmemory, dvcpu, dvcputopo = common.getDomainMainDetails(parsed_xml)
|
||||
except AttributeError:
|
||||
click.echo('Error: Domain {} does not exist.'.format(domain))
|
||||
parsed_xml = common.getDomainXML(zk_conn, uuid)
|
||||
duuid, dname, ddescription, dmemory, dvcpu, dvcputopo = common.getDomainMainDetails(parsed_xml)
|
||||
dnets = common.getDomainNetworks(parsed_xml)
|
||||
|
||||
if long_output == True:
|
||||
dtype, darch, dmachine, dconsole, demulator = common.getDomainExtraDetails(parsed_xml)
|
||||
dfeatures = common.getDomainCPUFeatures(parsed_xml)
|
||||
ddisks = common.getDomainDisks(parsed_xml)
|
||||
dnets = common.getDomainNetworks(parsed_xml)
|
||||
dcontrollers = common.getDomainControllers(parsed_xml)
|
||||
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
@ -98,8 +95,17 @@ def getInformationFromXML(zk_conn, uuid, long_output):
|
||||
'unmigrate': ansiiprint.blue()
|
||||
}
|
||||
ainformation.append('{}State:{} {}{}{}'.format(ansiiprint.purple(), ansiiprint.end(), dstate_colour[dstate], dstate, ansiiprint.end()))
|
||||
ainformation.append('{}Active Hypervisor:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), dhypervisor))
|
||||
ainformation.append('{}Last Hypervisor:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), dlasthypervisor))
|
||||
ainformation.append('{}Current Node:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), dnode))
|
||||
ainformation.append('{}Previous Node:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), dlastnode))
|
||||
|
||||
# Network list
|
||||
net_list = []
|
||||
for net in dnets:
|
||||
# Split out just the numerical (VNI) part of the brXXXX name
|
||||
net_vni = re.findall(r'\d+', net['source'])[0]
|
||||
net_list.append(net_vni)
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Networks:{} {}'.format(ansiiprint.purple(), ansiiprint.end(), ', '.join(net_list)))
|
||||
|
||||
if long_output == True:
|
||||
# Disk list
|
||||
@ -112,7 +118,6 @@ def getInformationFromXML(zk_conn, uuid, long_output):
|
||||
ainformation.append('{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus{4}'.format(ansiiprint.purple(), ansiiprint.end(), ansiiprint.bold(), 'Name', ansiiprint.end(), width=name_length))
|
||||
for disk in ddisks:
|
||||
ainformation.append(' {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5}'.format(ddisks.index(disk), disk['type'], disk['name'], disk['dev'], disk['bus'], width=name_length))
|
||||
# Network list
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Interfaces:{} {}ID Type Source Model MAC{}'.format(ansiiprint.purple(), ansiiprint.end(), ansiiprint.bold(), ansiiprint.end()))
|
||||
for net in dnets:
|
||||
@ -193,25 +198,25 @@ def getDomainName(zk_conn, domain):
|
||||
#
|
||||
# Direct functions
|
||||
#
|
||||
def define_vm(zk_conn, config_data, target_hypervisor, selector):
|
||||
def define_vm(zk_conn, config_data, target_node, selector):
|
||||
# Parse the XML data
|
||||
parsed_xml = lxml.objectify.fromstring(config_data)
|
||||
dom_uuid = parsed_xml.uuid.text
|
||||
dom_name = parsed_xml.name.text
|
||||
click.echo('Adding new VM with Name "{}" and UUID "{}" to database.'.format(dom_name, dom_uuid))
|
||||
|
||||
if target_hypervisor == None:
|
||||
target_hypervisor = common.findTargetHypervisor(zk_conn, selector, dom_uuid)
|
||||
if target_node == None:
|
||||
target_node = common.findTargetNode(zk_conn, selector, dom_uuid)
|
||||
|
||||
# Verify node is valid
|
||||
common.verifyNode(zk_conn, target_hypervisor)
|
||||
common.verifyNode(zk_conn, target_node)
|
||||
|
||||
# Add the new domain to Zookeeper
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.create('/domains/{}'.format(dom_uuid), dom_name.encode('ascii'))
|
||||
transaction.create('/domains/{}/state'.format(dom_uuid), 'stop'.encode('ascii'))
|
||||
transaction.create('/domains/{}/hypervisor'.format(dom_uuid), target_hypervisor.encode('ascii'))
|
||||
transaction.create('/domains/{}/lasthypervisor'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.create('/domains/{}/node'.format(dom_uuid), target_node.encode('ascii'))
|
||||
transaction.create('/domains/{}/lastnode'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.create('/domains/{}/failedreason'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.create('/domains/{}/xml'.format(dom_uuid), config_data.encode('ascii'))
|
||||
results = transaction.commit()
|
||||
@ -251,7 +256,7 @@ def undefine_vm(zk_conn, domain):
|
||||
transaction.set_data('/domains/{}/state'.format(dom_uuid), 'stop'.encode('ascii'))
|
||||
transaction.commit()
|
||||
|
||||
# Wait for 3 seconds to allow state to flow to all hypervisors
|
||||
# Wait for 3 seconds to allow state to flow to all nodes
|
||||
click.echo('Waiting for cluster to update.')
|
||||
time.sleep(1)
|
||||
except:
|
||||
@ -343,43 +348,43 @@ def stop_vm(zk_conn, domain):
|
||||
|
||||
return True, ''
|
||||
|
||||
def move_vm(zk_conn, domain, target_hypervisor, selector):
|
||||
def move_vm(zk_conn, domain, target_node, selector):
|
||||
# Validate and obtain alternate passed value
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if dom_uuid == None:
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
current_hypervisor = zk_conn.get('/domains/{}/hypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||
current_node = zk_conn.get('/domains/{}/node'.format(dom_uuid))[0].decode('ascii')
|
||||
|
||||
if target_hypervisor == None:
|
||||
target_hypervisor = common.findTargetHypervisor(zk_conn, selector, dom_uuid)
|
||||
if target_node == None:
|
||||
target_node = common.findTargetNode(zk_conn, selector, dom_uuid)
|
||||
else:
|
||||
if target_hypervisor == current_hypervisor:
|
||||
if target_node == current_node:
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: VM "{}" is already running on hypervisor "{}".'.format(dom_uuid, current_hypervisor)
|
||||
return False, 'ERROR: VM "{}" is already running on node "{}".'.format(dom_uuid, current_node)
|
||||
|
||||
# Verify node is valid
|
||||
common.verifyNode(zk_conn, target_hypervisor)
|
||||
common.verifyNode(zk_conn, target_node)
|
||||
|
||||
current_vm_state = zk_conn.get('/domains/{}/state'.format(dom_uuid))[0].decode('ascii')
|
||||
if current_vm_state == 'start':
|
||||
click.echo('Permanently migrating VM "{}" to hypervisor "{}".'.format(dom_uuid, target_hypervisor))
|
||||
click.echo('Permanently migrating VM "{}" to node "{}".'.format(dom_uuid, target_node))
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/domains/{}/state'.format(dom_uuid), 'migrate'.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/hypervisor'.format(dom_uuid), target_hypervisor.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lasthypervisor'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/node'.format(dom_uuid), target_node.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lastnode'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.commit()
|
||||
else:
|
||||
click.echo('Permanently moving VM "{}" to hypervisor "{}".'.format(dom_uuid, target_hypervisor))
|
||||
click.echo('Permanently moving VM "{}" to node "{}".'.format(dom_uuid, target_node))
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/domains/{}/hypervisor'.format(dom_uuid), target_hypervisor.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lasthypervisor'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/node'.format(dom_uuid), target_node.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lastnode'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.commit()
|
||||
|
||||
return True, ''
|
||||
|
||||
def migrate_vm(zk_conn, domain, target_hypervisor, selector, force_migrate):
|
||||
def migrate_vm(zk_conn, domain, target_node, selector, force_migrate):
|
||||
# Validate and obtain alternate passed value
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if dom_uuid == None:
|
||||
@ -393,32 +398,32 @@ def migrate_vm(zk_conn, domain, target_hypervisor, selector, force_migrate):
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
|
||||
current_hypervisor = zk_conn.get('/domains/{}/hypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||
last_hypervisor = zk_conn.get('/domains/{}/lasthypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||
current_node = zk_conn.get('/domains/{}/node'.format(dom_uuid))[0].decode('ascii')
|
||||
last_node = zk_conn.get('/domains/{}/lastnode'.format(dom_uuid))[0].decode('ascii')
|
||||
|
||||
if last_hypervisor != '' and force_migrate != True:
|
||||
if last_node != '' and force_migrate != True:
|
||||
click.echo('ERROR: VM "{}" has been previously migrated.'.format(dom_uuid))
|
||||
click.echo('> Last hypervisor: {}'.format(last_hypervisor))
|
||||
click.echo('> Current hypervisor: {}'.format(current_hypervisor))
|
||||
click.echo('Run `vm unmigrate` to restore the VM to its previous hypervisor, or use `--force` to override this check.')
|
||||
click.echo('> Last node: {}'.format(last_node))
|
||||
click.echo('> Current node: {}'.format(current_node))
|
||||
click.echo('Run `vm unmigrate` to restore the VM to its previous node, or use `--force` to override this check.')
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, ''
|
||||
|
||||
if target_hypervisor == None:
|
||||
target_hypervisor = findTargetHypervisor(zk_conn, selector, dom_uuid)
|
||||
if target_node == None:
|
||||
target_node = findTargetNode(zk_conn, selector, dom_uuid)
|
||||
else:
|
||||
if target_hypervisor == current_hypervisor:
|
||||
if target_node == current_node:
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: VM "{}" is already running on hypervisor "{}".'.format(dom_uuid, current_hypervisor)
|
||||
return False, 'ERROR: VM "{}" is already running on node "{}".'.format(dom_uuid, current_node)
|
||||
|
||||
# Verify node is valid
|
||||
common.verifyNode(zk_conn, target_hypervisor)
|
||||
common.verifyNode(zk_conn, target_node)
|
||||
|
||||
click.echo('Migrating VM "{}" to hypervisor "{}".'.format(dom_uuid, target_hypervisor))
|
||||
click.echo('Migrating VM "{}" to node "{}".'.format(dom_uuid, target_node))
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/domains/{}/state'.format(dom_uuid), target_state.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/hypervisor'.format(dom_uuid), target_hypervisor.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lasthypervisor'.format(dom_uuid), current_hypervisor.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/node'.format(dom_uuid), target_node.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lastnode'.format(dom_uuid), current_node.encode('ascii'))
|
||||
transaction.commit()
|
||||
|
||||
return True, ''
|
||||
@ -437,17 +442,17 @@ def unmigrate_vm(zk_conn, domain):
|
||||
else:
|
||||
target_state = 'migrate'
|
||||
|
||||
target_hypervisor = zk_conn.get('/domains/{}/lasthypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||
target_node = zk_conn.get('/domains/{}/lastnode'.format(dom_uuid))[0].decode('ascii')
|
||||
|
||||
if target_hypervisor == '':
|
||||
if target_node == '':
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: VM "{}" has not been previously migrated.'.format(dom_uuid)
|
||||
|
||||
click.echo('Unmigrating VM "{}" back to hypervisor "{}".'.format(dom_uuid, target_hypervisor))
|
||||
click.echo('Unmigrating VM "{}" back to node "{}".'.format(dom_uuid, target_node))
|
||||
transaction = zk_conn.transaction()
|
||||
transaction.set_data('/domains/{}/state'.format(dom_uuid), target_state.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/hypervisor'.format(dom_uuid), target_hypervisor.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lasthypervisor'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/node'.format(dom_uuid), target_node.encode('ascii'))
|
||||
transaction.set_data('/domains/{}/lastnode'.format(dom_uuid), ''.encode('ascii'))
|
||||
transaction.commit()
|
||||
|
||||
return True, ''
|
||||
@ -473,16 +478,16 @@ def get_info(zk_conn, domain, long_output):
|
||||
|
||||
return True, ''
|
||||
|
||||
def get_list(zk_conn, hypervisor, limit):
|
||||
if hypervisor != None:
|
||||
def get_list(zk_conn, node, limit):
|
||||
if node != None:
|
||||
# Verify node is valid
|
||||
common.verifyNode(zk_conn, hypervisor)
|
||||
common.verifyNode(zk_conn, node)
|
||||
|
||||
full_vm_list = zk_conn.get_children('/domains')
|
||||
vm_list = []
|
||||
vm_list_output = []
|
||||
|
||||
vm_hypervisor = {}
|
||||
vm_node = {}
|
||||
vm_state = {}
|
||||
vm_migrated = {}
|
||||
vm_uuid = {}
|
||||
@ -490,13 +495,14 @@ def get_list(zk_conn, hypervisor, limit):
|
||||
vm_description = {}
|
||||
vm_memory = {}
|
||||
vm_vcpu = {}
|
||||
vm_nets = {}
|
||||
|
||||
# If we're limited, remove other nodes' VMs
|
||||
for vm in full_vm_list:
|
||||
|
||||
# Check we don't match the limit
|
||||
name = zkhandler.readdata(zk_conn, '/domains/{}'.format(vm))
|
||||
vm_hypervisor[vm] = zkhandler.readdata(zk_conn, '/domains/{}/hypervisor'.format(vm))
|
||||
vm_node[vm] = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(vm))
|
||||
if limit != None:
|
||||
try:
|
||||
# Implcitly assume fuzzy limits
|
||||
@ -506,72 +512,85 @@ def get_list(zk_conn, hypervisor, limit):
|
||||
limit = limit + '.*'
|
||||
|
||||
if re.match(limit, vm) != None:
|
||||
if hypervisor == None:
|
||||
if node == None:
|
||||
vm_list.append(vm)
|
||||
else:
|
||||
if vm_hypervisor[vm] == hypervisor:
|
||||
if vm_node[vm] == node:
|
||||
vm_list.append(vm)
|
||||
|
||||
if re.match(limit, name) != None:
|
||||
if hypervisor == None:
|
||||
if node == None:
|
||||
vm_list.append(vm)
|
||||
else:
|
||||
if vm_hypervisor[vm] == hypervisor:
|
||||
if vm_node[vm] == node:
|
||||
vm_list.append(vm)
|
||||
except Exception as e:
|
||||
return False, 'Regex Error: {}'.format(e)
|
||||
else:
|
||||
# Check hypervisor to avoid unneeded ZK calls
|
||||
if hypervisor == None:
|
||||
# Check node to avoid unneeded ZK calls
|
||||
if node == None:
|
||||
vm_list.append(vm)
|
||||
else:
|
||||
if vm_hypervisor[vm] == hypervisor:
|
||||
if vm_node[vm] == node:
|
||||
vm_list.append(vm)
|
||||
|
||||
# Gather information for printing
|
||||
for vm in vm_list:
|
||||
vm_state[vm] = zk_conn.get('/domains/{}/state'.format(vm))[0].decode('ascii')
|
||||
vm_lasthypervisor = zk_conn.get('/domains/{}/lasthypervisor'.format(vm))[0].decode('ascii')
|
||||
if vm_lasthypervisor != '':
|
||||
vm_migrated[vm] = 'from {}'.format(vm_lasthypervisor)
|
||||
vm_lastnode = zk_conn.get('/domains/{}/lastnode'.format(vm))[0].decode('ascii')
|
||||
if vm_lastnode != '':
|
||||
vm_migrated[vm] = 'from {}'.format(vm_lastnode)
|
||||
else:
|
||||
vm_migrated[vm] = 'no'
|
||||
|
||||
try:
|
||||
vm_xml = common.getDomainXML(zk_conn, vm)
|
||||
vm_uuid[vm], vm_name[vm], vm_description[vm], vm_memory[vm], vm_vcpu[vm], vm_vcputopo = common.getDomainMainDetails(vm_xml)
|
||||
dnets = common.getDomainNetworks(vm_xml)
|
||||
net_list = []
|
||||
for net in dnets:
|
||||
# Split out just the numerical (VNI) part of the brXXXX name
|
||||
net_vni = re.findall(r'\d+', net['source'])[0]
|
||||
net_list.append(net_vni)
|
||||
vm_nets[vm] = ','.join(net_list)
|
||||
except AttributeError:
|
||||
click.echo('Error: Domain {} does not exist.'.format(domain))
|
||||
|
||||
# Determine optimal column widths
|
||||
# Dynamic columns: node_name, hypervisor, migrated
|
||||
vm_name_length = 0
|
||||
vm_hypervisor_length = 0
|
||||
vm_migrated_length = 0
|
||||
# Dynamic columns: node_name, node, migrated
|
||||
vm_name_length = 10
|
||||
vm_node_length = 8
|
||||
vm_nets_length = 9
|
||||
vm_migrated_length = 10
|
||||
for vm in vm_list:
|
||||
# vm_name column
|
||||
_vm_name_length = len(vm_name[vm]) + 1
|
||||
if _vm_name_length > vm_name_length:
|
||||
vm_name_length = _vm_name_length
|
||||
# vm_hypervisor column
|
||||
_vm_hypervisor_length = len(vm_hypervisor[vm]) + 1
|
||||
if _vm_hypervisor_length > vm_hypervisor_length:
|
||||
vm_hypervisor_length = _vm_hypervisor_length
|
||||
# vm_node column
|
||||
_vm_node_length = len(vm_node[vm]) + 1
|
||||
if _vm_node_length > vm_node_length:
|
||||
vm_node_length = _vm_node_length
|
||||
# vm_nets column
|
||||
_vm_nets_length = len(vm_nets[vm]) + 1
|
||||
if _vm_nets_length > vm_nets_length:
|
||||
vm_nets_length = _vm_nets_length
|
||||
# vm_migrated column
|
||||
_vm_migrated_length = len(vm_migrated[vm]) + 1
|
||||
if _vm_migrated_length > vm_migrated_length:
|
||||
vm_migrated_length = _vm_migrated_length
|
||||
|
||||
# Format the string (header)
|
||||
vm_list_header = ansiiprint.bold() + 'Name UUID State RAM [MiB] vCPUs Hypervisor Migrated?' + ansiiprint.end()
|
||||
vm_list_output.append(
|
||||
'{bold}{vm_name: <{vm_name_length}} {vm_uuid: <37} \
|
||||
{vm_state_colour}{vm_state: <8}{end_colour} \
|
||||
{vm_networks: <{vm_nets_length}} \
|
||||
{vm_memory: <10} {vm_vcpu: <6} \
|
||||
{vm_hypervisor: <{vm_hypervisor_length}} \
|
||||
{vm_node: <{vm_node_length}} \
|
||||
{vm_migrated: <{vm_migrated_length}}{end_bold}'.format(
|
||||
vm_name_length=vm_name_length,
|
||||
vm_hypervisor_length=vm_hypervisor_length,
|
||||
vm_node_length=vm_node_length,
|
||||
vm_nets_length=vm_nets_length,
|
||||
vm_migrated_length=vm_migrated_length,
|
||||
bold=ansiiprint.bold(),
|
||||
end_bold=ansiiprint.end(),
|
||||
@ -580,9 +599,10 @@ def get_list(zk_conn, hypervisor, limit):
|
||||
vm_name='Name',
|
||||
vm_uuid='UUID',
|
||||
vm_state='State',
|
||||
vm_networks='Networks',
|
||||
vm_memory='RAM (MiB)',
|
||||
vm_vcpu='vCPUs',
|
||||
vm_hypervisor='Hypervisor',
|
||||
vm_node='Node',
|
||||
vm_migrated='Migrated'
|
||||
)
|
||||
)
|
||||
@ -605,11 +625,13 @@ def get_list(zk_conn, hypervisor, limit):
|
||||
vm_list_output.append(
|
||||
'{bold}{vm_name: <{vm_name_length}} {vm_uuid: <37} \
|
||||
{vm_state_colour}{vm_state: <8}{end_colour} \
|
||||
{vm_networks: <{vm_nets_length}} \
|
||||
{vm_memory: <10} {vm_vcpu: <6} \
|
||||
{vm_hypervisor: <{vm_hypervisor_length}} \
|
||||
{vm_node: <{vm_node_length}} \
|
||||
{vm_migrated: <{vm_migrated_length}}{end_bold}'.format(
|
||||
vm_name_length=vm_name_length,
|
||||
vm_hypervisor_length=vm_hypervisor_length,
|
||||
vm_node_length=vm_node_length,
|
||||
vm_nets_length=vm_nets_length,
|
||||
vm_migrated_length=vm_migrated_length,
|
||||
bold='',
|
||||
end_bold='',
|
||||
@ -618,9 +640,10 @@ def get_list(zk_conn, hypervisor, limit):
|
||||
vm_name=vm_name[vm],
|
||||
vm_uuid=vm_uuid[vm],
|
||||
vm_state=vm_state[vm],
|
||||
vm_networks=vm_nets[vm],
|
||||
vm_memory=vm_memory[vm],
|
||||
vm_vcpu=vm_vcpu[vm],
|
||||
vm_hypervisor=vm_hypervisor[vm],
|
||||
vm_node=vm_node[vm],
|
||||
vm_migrated=vm_migrated[vm]
|
||||
)
|
||||
)
|
Reference in New Issue
Block a user