Lint: E202 whitespace before '}'

This commit is contained in:
2020-11-07 12:57:42 -05:00
parent b7daa8e1f6
commit 3f242cd437
16 changed files with 411 additions and 411 deletions

View File

@ -557,7 +557,7 @@ def cleanup():
logger.out('Terminating pvcnoded and cleaning up', state='s')
# Set shutdown state in Zookeeper
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'shutdown' })
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'shutdown'})
# Waiting for any flushes to complete
logger.out('Waiting for any active flushes', state='s')
@ -599,7 +599,7 @@ def cleanup():
node_keepalive()
# Set stop state in Zookeeper
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'stop' })
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(myhostname): 'stop'})
# Forcibly terminate dnsmasq because it gets stuck sometimes
common.run_os_command('killall dnsmasq')
@ -690,7 +690,7 @@ if current_primary and current_primary != 'none':
else:
if config['daemon_mode'] == 'coordinator':
logger.out('No primary node found; creating with us as primary.', state='i')
zkhandler.writedata(zk_conn, {'/primary_node': myhostname })
zkhandler.writedata(zk_conn, {'/primary_node': myhostname})
###############################################################################
# PHASE 7a - Ensure IPMI is reachable and working
@ -1042,7 +1042,7 @@ def collect_ceph_stats(queue):
logger.out("Getting health stats from monitor", state='d', prefix='ceph-thread')
# Get Ceph cluster health for local status output
command = {"prefix": "health", "format": "json" }
command = {"prefix": "health", "format": "json"}
try:
health_status = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])
ceph_health = health_status['status']
@ -1062,7 +1062,7 @@ def collect_ceph_stats(queue):
if debug:
logger.out("Set ceph health information in zookeeper (primary only)", state='d', prefix='ceph-thread')
command = {"prefix": "status", "format": "pretty" }
command = {"prefix": "status", "format": "pretty"}
ceph_status = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
try:
zkhandler.writedata(zk_conn, {
@ -1076,7 +1076,7 @@ def collect_ceph_stats(queue):
logger.out("Set ceph rados df information in zookeeper (primary only)", state='d', prefix='ceph-thread')
# Get rados df info
command = {"prefix": "df", "format": "pretty" }
command = {"prefix": "df", "format": "pretty"}
ceph_df = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
try:
zkhandler.writedata(zk_conn, {
@ -1090,7 +1090,7 @@ def collect_ceph_stats(queue):
logger.out("Set pool information in zookeeper (primary only)", state='d', prefix='ceph-thread')
# Get pool info
command = {"prefix": "df", "format": "json" }
command = {"prefix": "df", "format": "json"}
try:
ceph_pool_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['pools']
except Exception as e:
@ -1161,7 +1161,7 @@ def collect_ceph_stats(queue):
# Parse the dump data
osd_dump = dict()
command = {"prefix": "osd dump", "format": "json" }
command = {"prefix": "osd dump", "format": "json"}
try:
retcode, stdout, stderr = common.run_os_command('ceph osd dump --format json --connect-timeout 2', timeout=2)
osd_dump_raw = json.loads(stdout)['osds']
@ -1187,7 +1187,7 @@ def collect_ceph_stats(queue):
osd_df = dict()
command = {"prefix": "osd df", "format": "json" }
command = {"prefix": "osd df", "format": "json"}
try:
osd_df_raw = json.loads(ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1])['nodes']
except Exception as e:
@ -1214,7 +1214,7 @@ def collect_ceph_stats(queue):
osd_status = dict()
command = {"prefix": "osd status", "format": "pretty" }
command = {"prefix": "osd status", "format": "pretty"}
try:
osd_status_raw = ceph_conn.mon_command(json.dumps(command), b'', timeout=1)[1].decode('ascii')
except Exception as e:
@ -1341,7 +1341,7 @@ def collect_vm_stats(queue):
raise
except Exception:
# Toggle a state "change"
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(domain): instance.getstate() })
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(domain): instance.getstate()})
elif instance.getnode() == this_node.name:
memprov += instance.getmemory()
@ -1469,7 +1469,7 @@ def node_keepalive():
past_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(this_node.name))
if past_state != 'run':
this_node.daemon_state = 'run'
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(this_node.name): 'run' })
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(this_node.name): 'run'})
else:
this_node.daemon_state = 'run'
@ -1638,7 +1638,7 @@ def node_keepalive():
fence_thread = Thread(target=fencing.fenceNode, args=(node_name, zk_conn, config, logger), kwargs={})
fence_thread.start()
# Write the updated data after we start the fence thread
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(node_name): 'dead' })
zkhandler.writedata(zk_conn, {'/nodes/{}/daemonstate'.format(node_name): 'dead'})
if debug:
logger.out("Keepalive finished", state='d', prefix='main-thread')

View File

@ -646,8 +646,8 @@ class NodeInstance(object):
if target_node is None:
self.logger.out('Failed to find migration target for VM "{}"; shutting down and setting autostart flag'.format(dom_uuid), state='e')
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(dom_uuid): 'shutdown' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/node_autostart'.format(dom_uuid): 'True' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(dom_uuid): 'shutdown'})
zkhandler.writedata(self.zk_conn, {'/domains/{}/node_autostart'.format(dom_uuid): 'True'})
else:
self.logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
zkhandler.writedata(self.zk_conn, {
@ -665,8 +665,8 @@ class NodeInstance(object):
break
time.sleep(0.2)
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.name): '' })
zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'flushed' })
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.name): ''})
zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'flushed'})
self.flush_thread = None
self.flush_stopper = False
return
@ -714,7 +714,7 @@ class NodeInstance(object):
while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(dom_uuid)) in ['migrate', 'unmigrate', 'shutdown']:
time.sleep(0.1)
zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'ready' })
zkhandler.writedata(self.zk_conn, {'/nodes/{}/domainstate'.format(self.name): 'ready'})
self.flush_thread = None
self.flush_stopper = False
return

View File

@ -86,7 +86,7 @@ class VMConsoleWatcherInstance(object):
self.fetch_lines()
# Update Zookeeper with the new loglines if they changed
if self.loglines != self.last_loglines:
zkhandler.writedata(self.zk_conn, {'/domains/{}/consolelog'.format(self.domuuid): self.loglines })
zkhandler.writedata(self.zk_conn, {'/domains/{}/consolelog'.format(self.domuuid): self.loglines})
self.last_loglines = self.loglines
def fetch_lines(self):

View File

@ -185,7 +185,7 @@ class VMInstance(object):
# Add the domain to the domain_list array
self.this_node.domain_list.append(self.domuuid)
# Push the change up to Zookeeper
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list) })
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list)})
except Exception as e:
self.logger.out('Error adding domain to list: {}'.format(e), state='e')
@ -195,7 +195,7 @@ class VMInstance(object):
# Remove the domain from the domain_list array
self.this_node.domain_list.remove(self.domuuid)
# Push the change up to Zookeeper
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list) })
zkhandler.writedata(self.zk_conn, {'/nodes/{}/runningdomains'.format(self.this_node.name): ' '.join(self.this_node.domain_list)})
except Exception as e:
self.logger.out('Error removing domain from list: {}'.format(e), state='e')
@ -225,7 +225,7 @@ class VMInstance(object):
if curstate == libvirt.VIR_DOMAIN_RUNNING:
# If it is running just update the model
self.addDomainToList()
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): '' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): ''})
else:
# Or try to create it
try:
@ -235,11 +235,11 @@ class VMInstance(object):
self.addDomainToList()
self.logger.out('Successfully started VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = dom
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): '' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): ''})
except libvirt.libvirtError as e:
self.logger.out('Failed to create VM', state='e', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): str(e) })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail'})
zkhandler.writedata(self.zk_conn, {'/domains/{}/failedreason'.format(self.domuuid): str(e)})
self.dom = None
lv_conn.close()
@ -264,7 +264,7 @@ class VMInstance(object):
self.start_vm()
self.addDomainToList()
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
lv_conn.close()
self.inrestart = False
@ -295,7 +295,7 @@ class VMInstance(object):
self.removeDomainFromList()
if self.inrestart is False:
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
self.logger.out('Successfully stopped VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = None
@ -329,7 +329,7 @@ class VMInstance(object):
if lvdomstate != libvirt.VIR_DOMAIN_RUNNING:
self.removeDomainFromList()
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
self.logger.out('Successfully shutdown VM', state='o', prefix='Domain {}'.format(self.domuuid))
self.dom = None
# Stop the log watcher
@ -338,7 +338,7 @@ class VMInstance(object):
if tick >= self.config['vm_shutdown_timeout']:
self.logger.out('Shutdown timeout ({}s) expired, forcing off'.format(self.config['vm_shutdown_timeout']), state='e', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'stop'})
break
self.inshutdown = False
@ -349,7 +349,7 @@ class VMInstance(object):
if self.inrestart:
# Wait to prevent race conditions
time.sleep(1)
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# Migrate the VM to a target host
def migrate_vm(self, force_live=False, force_shutdown=False):
@ -458,7 +458,7 @@ class VMInstance(object):
def migrate_shutdown():
self.logger.out('Shutting down VM for offline migration', state='i', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'shutdown' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'shutdown'})
while zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid)) != 'stop':
time.sleep(0.5)
return True
@ -551,7 +551,7 @@ class VMInstance(object):
self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid))
# Ensure our lock key is populated
zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): self.domuuid })
zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): self.domuuid})
# Synchronize nodes A (I am writer)
lock = zkhandler.writelock(self.zk_conn, '/locks/domain_migrate/{}'.format(self.domuuid))
@ -601,11 +601,11 @@ class VMInstance(object):
if lvdomstate == libvirt.VIR_DOMAIN_RUNNING:
# VM has been received and started
self.addDomainToList()
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
self.logger.out('Successfully received migrated VM', state='o', prefix='Domain {}'.format(self.domuuid))
else:
# The receive somehow failed
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'fail'})
else:
if self.node == self.this_node.name:
if self.state in ['start']:
@ -613,7 +613,7 @@ class VMInstance(object):
self.logger.out('Receive aborted via state change', state='w', prefix='Domain {}'.format(self.domuuid))
elif self.state in ['stop']:
# The send was shutdown-based
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
else:
# The send failed or was aborted
self.logger.out('Migrate aborted or failed; VM in state {}'.format(self.state), state='w', prefix='Domain {}'.format(self.domuuid))
@ -622,7 +622,7 @@ class VMInstance(object):
lock.release()
self.logger.out('Released write lock for synchronization phase D', state='o', prefix='Domain {}'.format(self.domuuid))
zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): '' })
zkhandler.writedata(self.zk_conn, {'/locks/domain_migrate/{}'.format(self.domuuid): ''})
self.inreceive = False
return
@ -681,7 +681,7 @@ class VMInstance(object):
elif self.state == "migrate" or self.state == "migrate-live":
# Start the log watcher
self.console_log_instance.start()
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# Add domain to running list
self.addDomainToList()
# VM should be restarted
@ -704,7 +704,7 @@ class VMInstance(object):
self.receive_migrate()
# VM should be restarted (i.e. started since it isn't running)
if self.state == "restart":
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start' })
zkhandler.writedata(self.zk_conn, {'/domains/{}/state'.format(self.domuuid): 'start'})
# VM should be shut down; ensure it's gone from this node's domain_list
elif self.state == "shutdown":
self.removeDomainFromList()

View File

@ -144,7 +144,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
node_limit = ''
except Exception:
node_limit = ''
zkhandler.writedata(zk_conn, {'/domains/{}/node_limit'.format(dom_uuid): '' })
zkhandler.writedata(zk_conn, {'/domains/{}/node_limit'.format(dom_uuid): ''})
# Determine VM search field
try:
@ -155,7 +155,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
# If our search field is invalid, use and set the default (for next time)
if search_field is None or search_field == 'None':
search_field = config['migration_target_selector']
zkhandler.writedata(zk_conn, {'/domains/{}/node_selector'.format(dom_uuid): config['migration_target_selector'] })
zkhandler.writedata(zk_conn, {'/domains/{}/node_selector'.format(dom_uuid): config['migration_target_selector']})
if config['debug']:
logger.out('Migrating VM {} with selector {}'.format(dom_uuid, search_field), state='d', prefix='node-flush')

View File

@ -62,9 +62,9 @@ def fenceNode(node_name, zk_conn, config, logger):
# Force into secondary network state if needed
if node_name in config['coordinators']:
logger.out('Forcing secondary status for node "{}"'.format(node_name), state='i')
zkhandler.writedata(zk_conn, {'/nodes/{}/routerstate'.format(node_name): 'secondary' })
zkhandler.writedata(zk_conn, {'/nodes/{}/routerstate'.format(node_name): 'secondary'})
if zkhandler.readdata(zk_conn, '/primary_node') == node_name:
zkhandler.writedata(zk_conn, {'/primary_node': 'none' })
zkhandler.writedata(zk_conn, {'/primary_node': 'none'})
# If the fence succeeded and successful_fence is migrate
if fence_status and config['successful_fence'] == 'migrate':
@ -82,7 +82,7 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
# Set the node to a custom domainstate so we know what's happening
zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'fence-flush' })
zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'fence-flush'})
# Migrate a VM after a flush
def fence_migrate_vm(dom_uuid):
@ -109,7 +109,7 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
fence_migrate_vm(dom_uuid)
# Set node in flushed state for easy remigrating when it comes back
zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'flushed' })
zkhandler.writedata(zk_conn, {'/nodes/{}/domainstate'.format(node_name): 'flushed'})
#
# Perform an IPMI fence

View File

@ -44,25 +44,25 @@ class Logger(object):
# Format maps
format_map_colourized = {
# Colourized formatting with chevron prompts (log_colours = True)
'o': {'colour': fmt_green, 'prompt': '>>> ' },
'e': {'colour': fmt_red, 'prompt': '>>> ' },
'w': {'colour': fmt_yellow, 'prompt': '>>> ' },
't': {'colour': fmt_purple, 'prompt': '>>> ' },
'i': {'colour': fmt_blue, 'prompt': '>>> ' },
's': {'colour': fmt_cyan, 'prompt': '>>> ' },
'd': {'colour': fmt_white, 'prompt': '>>> ' },
'x': {'colour': last_colour, 'prompt': last_prompt }
'o': {'colour': fmt_green, 'prompt': '>>> '},
'e': {'colour': fmt_red, 'prompt': '>>> '},
'w': {'colour': fmt_yellow, 'prompt': '>>> '},
't': {'colour': fmt_purple, 'prompt': '>>> '},
'i': {'colour': fmt_blue, 'prompt': '>>> '},
's': {'colour': fmt_cyan, 'prompt': '>>> '},
'd': {'colour': fmt_white, 'prompt': '>>> '},
'x': {'colour': last_colour, 'prompt': last_prompt}
}
format_map_textual = {
# Uncolourized formatting with text prompts (log_colours = False)
'o': {'colour': '', 'prompt': 'ok: ' },
'e': {'colour': '', 'prompt': 'failed: ' },
'w': {'colour': '', 'prompt': 'warning: ' },
't': {'colour': '', 'prompt': 'tick: ' },
'i': {'colour': '', 'prompt': 'info: ' },
's': {'colour': '', 'prompt': 'system: ' },
'd': {'colour': '', 'prompt': 'debug: ' },
'x': {'colour': '', 'prompt': last_prompt }
'o': {'colour': '', 'prompt': 'ok: '},
'e': {'colour': '', 'prompt': 'failed: '},
'w': {'colour': '', 'prompt': 'warning: '},
't': {'colour': '', 'prompt': 'tick: '},
'i': {'colour': '', 'prompt': 'info: '},
's': {'colour': '', 'prompt': 'system: '},
'd': {'colour': '', 'prompt': 'debug: '},
'x': {'colour': '', 'prompt': last_prompt}
}
# Initialization of instance