2019-12-25 14:10:23 -05:00
#!/usr/bin/env python3
2019-12-30 09:07:41 -05:00
# ceph.py - PVC CLI client function library, Ceph cluster functions
2019-12-25 14:10:23 -05:00
# Part of the Parallel Virtual Cluster (PVC) system
#
2021-03-25 17:01:55 -04:00
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
2019-12-25 14:10:23 -05:00
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
2021-03-25 16:57:17 -04:00
# the Free Software Foundation, version 3.
2019-12-25 14:10:23 -05:00
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
import math
2021-11-26 11:34:20 -05:00
from json import dumps , loads
2021-11-06 03:02:43 -04:00
from requests_toolbelt . multipart . encoder import (
MultipartEncoder ,
MultipartEncoderMonitor ,
)
2020-02-20 22:40:49 -05:00
2021-06-23 04:08:02 -04:00
import pvc . cli_lib . ansiprint as ansiprint
from pvc . cli_lib . common import UploadProgressBar , call_api
2020-01-05 12:51:06 -05:00
2019-12-25 14:10:23 -05:00
#
# Supplemental functions
#
2020-11-07 14:45:24 -05:00
# Matrix of human-to-byte values
2019-12-25 14:10:23 -05:00
byte_unit_matrix = {
2021-11-06 03:02:43 -04:00
" B " : 1 ,
" K " : 1024 ,
" M " : 1024 * 1024 ,
" G " : 1024 * 1024 * 1024 ,
" T " : 1024 * 1024 * 1024 * 1024 ,
" P " : 1024 * 1024 * 1024 * 1024 * 1024 ,
2019-12-25 14:10:23 -05:00
}
2020-11-07 14:45:24 -05:00
# Matrix of human-to-metric values
ops_unit_matrix = {
2021-11-06 03:02:43 -04:00
" " : 1 ,
" K " : 1000 ,
" M " : 1000 * 1000 ,
" G " : 1000 * 1000 * 1000 ,
" T " : 1000 * 1000 * 1000 * 1000 ,
" P " : 1000 * 1000 * 1000 * 1000 * 1000 ,
2020-11-07 14:45:24 -05:00
}
# Format byte sizes to/from human-readable units
2019-12-25 14:10:23 -05:00
def format_bytes_tohuman ( databytes ) :
2021-11-06 03:02:43 -04:00
datahuman = " "
2019-12-25 14:10:23 -05:00
for unit in sorted ( byte_unit_matrix , key = byte_unit_matrix . get , reverse = True ) :
new_bytes = int ( math . ceil ( databytes / byte_unit_matrix [ unit ] ) )
# Round up if 5 or more digits
if new_bytes > 9999 :
# We can jump down another level
continue
else :
# We're at the end, display with this size
2021-11-06 03:02:43 -04:00
datahuman = " {} {} " . format ( new_bytes , unit )
2019-12-25 14:10:23 -05:00
return datahuman
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_bytes_fromhuman ( datahuman ) :
# Trim off human-readable character
dataunit = datahuman [ - 1 ]
datasize = int ( datahuman [ : - 1 ] )
databytes = datasize * byte_unit_matrix [ dataunit ]
2021-11-06 03:02:43 -04:00
return " {} B " . format ( databytes )
2019-12-25 14:10:23 -05:00
2020-11-07 13:17:49 -05:00
2019-12-25 14:10:23 -05:00
# Format ops sizes to/from human-readable units
def format_ops_tohuman ( dataops ) :
2021-11-06 03:02:43 -04:00
datahuman = " "
2019-12-25 14:10:23 -05:00
for unit in sorted ( ops_unit_matrix , key = ops_unit_matrix . get , reverse = True ) :
new_ops = int ( math . ceil ( dataops / ops_unit_matrix [ unit ] ) )
2021-10-02 02:47:17 -04:00
# Round up if 6 or more digits
if new_ops > 99999 :
2019-12-25 14:10:23 -05:00
# We can jump down another level
continue
else :
# We're at the end, display with this size
2021-11-06 03:02:43 -04:00
datahuman = " {} {} " . format ( new_ops , unit )
2019-12-25 14:10:23 -05:00
return datahuman
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_ops_fromhuman ( datahuman ) :
# Trim off human-readable character
dataunit = datahuman [ - 1 ]
datasize = int ( datahuman [ : - 1 ] )
dataops = datasize * ops_unit_matrix [ dataunit ]
2021-11-06 03:02:43 -04:00
return " {} " . format ( dataops )
2019-12-25 14:10:23 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_pct_tohuman ( datapct ) :
datahuman = " {0:.1f} " . format ( float ( datapct * 100.0 ) )
return datahuman
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
#
# Status functions
#
2019-12-29 20:33:51 -05:00
def ceph_status ( config ) :
"""
Get status of the Ceph cluster
API endpoint : GET / api / v1 / storage / ceph / status
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/status " )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2020-11-06 19:05:48 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_util ( config ) :
"""
Get utilization of the Ceph cluster
API endpoint : GET / api / v1 / storage / ceph / utilization
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/utilization " )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2020-11-06 19:05:48 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_raw_output ( status_data ) :
2020-01-05 12:35:00 -05:00
ainformation = list ( )
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} Ceph cluster {stype} (primary node {end} {blue} {primary} {end} {bold} ) {end} \n " . format (
bold = ansiprint . bold ( ) ,
end = ansiprint . end ( ) ,
blue = ansiprint . blue ( ) ,
stype = status_data [ " type " ] ,
primary = status_data [ " primary_node " ] ,
)
)
ainformation . append ( status_data [ " ceph_data " ] )
ainformation . append ( " " )
2020-01-05 12:35:00 -05:00
2021-11-06 03:02:43 -04:00
return " \n " . join ( ainformation )
2019-12-25 14:10:23 -05:00
2020-11-07 14:45:24 -05:00
2021-09-23 13:59:49 -04:00
#
# OSD DB VG functions
#
def ceph_osd_db_vg_add ( config , node , device ) :
"""
Add new Ceph OSD database volume group
API endpoint : POST / api / v1 / storage / ceph / osddb
API arguments : node = { node } , device = { device }
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " node " : node , " device " : device }
response = call_api ( config , " post " , " /storage/ceph/osddb " , params = params )
2021-09-23 13:59:49 -04:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2021-09-23 13:59:49 -04:00
2019-12-25 14:10:23 -05:00
#
# OSD functions
#
2019-12-29 20:33:51 -05:00
def ceph_osd_info ( config , osd ) :
"""
Get information about Ceph OSD
API endpoint : GET / api / v1 / storage / ceph / osd / { osd }
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/osd/ {osd} " . format ( osd = osd ) )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
2020-12-02 19:15:33 -05:00
if isinstance ( response . json ( ) , list ) and len ( response . json ( ) ) != 1 :
# No exact match; return not found
return False , " OSD not found. "
else :
# Return a single instance if the response is a list
if isinstance ( response . json ( ) , list ) :
return True , response . json ( ) [ 0 ]
# This shouldn't happen, but is here just in case
else :
return True , response . json ( )
2019-12-29 20:33:51 -05:00
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_osd_list ( config , limit ) :
"""
Get list information about Ceph OSDs ( limited by { limit } )
API endpoint : GET / api / v1 / storage / ceph / osd
API arguments : limit = { limit }
API schema : [ { json_data_object } , { json_data_object } , etc . ]
"""
params = dict ( )
if limit :
2021-11-06 03:02:43 -04:00
params [ " limit " ] = limit
2019-12-29 20:33:51 -05:00
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/osd " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2021-09-23 23:31:58 -04:00
def ceph_osd_add ( config , node , device , weight , ext_db_flag , ext_db_ratio ) :
2019-12-29 20:33:51 -05:00
"""
Add new Ceph OSD
API endpoint : POST / api / v1 / storage / ceph / osd
2021-09-23 23:31:58 -04:00
API arguments : node = { node } , device = { device } , weight = { weight } , ext_db = { ext_db_flag } , ext_db_ratio = { ext_db_ratio }
2019-12-29 20:33:51 -05:00
API schema : { " message " : " {data} " }
"""
2020-01-08 19:34:24 -05:00
params = {
2021-11-06 03:02:43 -04:00
" node " : node ,
" device " : device ,
" weight " : weight ,
" ext_db " : ext_db_flag ,
" ext_db_ratio " : ext_db_ratio ,
2020-01-08 19:34:24 -05:00
}
2021-11-06 03:02:43 -04:00
response = call_api ( config , " post " , " /storage/ceph/osd " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_osd_remove ( config , osdid ) :
"""
Remove Ceph OSD
2020-01-06 12:59:00 -05:00
API endpoint : DELETE / api / v1 / storage / ceph / osd / { osdid }
2019-12-29 20:33:51 -05:00
API arguments :
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " yes-i-really-mean-it " : " yes " }
response = call_api (
config , " delete " , " /storage/ceph/osd/ {osdid} " . format ( osdid = osdid ) , params = params
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_osd_state ( config , osdid , state ) :
"""
Set state of Ceph OSD
API endpoint : POST / api / v1 / storage / ceph / osd / { osdid } / state
API arguments : state = { state }
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " state " : state }
response = call_api (
config ,
" post " ,
" /storage/ceph/osd/ {osdid} /state " . format ( osdid = osdid ) ,
params = params ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_osd_option ( config , option , action ) :
"""
Set cluster option of Ceph OSDs
API endpoint : POST / api / v1 / storage / ceph / option
API arguments : option = { option } , action = { action }
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " option " : option , " action " : action }
response = call_api ( config , " post " , " /storage/ceph/option " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def getOutputColoursOSD ( osd_information ) :
# Set the UP status
2021-11-06 03:02:43 -04:00
if osd_information [ " stats " ] [ " up " ] == 1 :
osd_up_flag = " Yes "
2019-12-25 14:10:23 -05:00
osd_up_colour = ansiprint . green ( )
else :
2021-11-06 03:02:43 -04:00
osd_up_flag = " No "
2019-12-25 14:10:23 -05:00
osd_up_colour = ansiprint . red ( )
# Set the IN status
2021-11-06 03:02:43 -04:00
if osd_information [ " stats " ] [ " in " ] == 1 :
osd_in_flag = " Yes "
2019-12-25 14:10:23 -05:00
osd_in_colour = ansiprint . green ( )
else :
2021-11-06 03:02:43 -04:00
osd_in_flag = " No "
2019-12-25 14:10:23 -05:00
osd_in_colour = ansiprint . red ( )
return osd_up_flag , osd_up_colour , osd_in_flag , osd_in_colour
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_list_osd ( osd_list ) :
2019-12-29 20:33:51 -05:00
# Handle empty list
if not osd_list :
osd_list = list ( )
2019-12-25 14:10:23 -05:00
osd_list_output = [ ]
osd_id_length = 3
2021-09-23 13:59:49 -04:00
osd_node_length = 5
osd_device_length = 6
osd_db_device_length = 9
2019-12-25 14:10:23 -05:00
osd_up_length = 4
osd_in_length = 4
osd_size_length = 5
osd_weight_length = 3
osd_reweight_length = 5
osd_pgs_length = 4
osd_used_length = 5
osd_free_length = 6
osd_util_length = 6
osd_var_length = 5
osd_wrops_length = 4
osd_wrdata_length = 5
osd_rdops_length = 4
osd_rddata_length = 5
for osd_information in osd_list :
try :
# If this happens, the node hasn't checked in fully yet, so just ignore it
2021-11-06 03:02:43 -04:00
if osd_information [ " stats " ] [ " node " ] == " | " :
2019-12-25 14:10:23 -05:00
continue
except KeyError :
continue
# Deal with the size to human readable
2021-11-06 03:02:43 -04:00
osd_information [ " stats " ] [ " size " ] = osd_information [ " stats " ] [ " kb " ] * 1024
for datatype in " size " , " wr_data " , " rd_data " :
databytes = osd_information [ " stats " ] [ datatype ]
2020-03-30 19:09:16 -04:00
if isinstance ( databytes , int ) :
databytes_formatted = format_bytes_tohuman ( databytes )
else :
databytes_formatted = databytes
2021-11-06 03:02:43 -04:00
osd_information [ " stats " ] [ datatype ] = databytes_formatted
for datatype in " wr_ops " , " rd_ops " :
dataops = osd_information [ " stats " ] [ datatype ]
2020-03-30 19:09:16 -04:00
if isinstance ( dataops , int ) :
dataops_formatted = format_ops_tohuman ( dataops )
else :
dataops_formatted = dataops
2021-11-06 03:02:43 -04:00
osd_information [ " stats " ] [ datatype ] = dataops_formatted
2019-12-25 14:10:23 -05:00
# Set the OSD ID length
2021-11-06 03:02:43 -04:00
_osd_id_length = len ( osd_information [ " id " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_id_length > osd_id_length :
osd_id_length = _osd_id_length
2021-09-23 13:59:49 -04:00
# Set the OSD node length
2021-11-06 03:02:43 -04:00
_osd_node_length = len ( osd_information [ " stats " ] [ " node " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_node_length > osd_node_length :
osd_node_length = _osd_node_length
2021-09-23 13:59:49 -04:00
# Set the OSD device length
2021-11-06 03:02:43 -04:00
_osd_device_length = len ( osd_information [ " device " ] ) + 1
2021-09-23 13:59:49 -04:00
if _osd_device_length > osd_device_length :
osd_device_length = _osd_device_length
# Set the OSD db_device length
2021-11-06 03:02:43 -04:00
_osd_db_device_length = len ( osd_information [ " db_device " ] ) + 1
2021-09-23 13:59:49 -04:00
if _osd_db_device_length > osd_db_device_length :
osd_db_device_length = _osd_db_device_length
2019-12-25 14:10:23 -05:00
# Set the size and length
2021-11-06 03:02:43 -04:00
_osd_size_length = len ( str ( osd_information [ " stats " ] [ " size " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _osd_size_length > osd_size_length :
osd_size_length = _osd_size_length
# Set the weight and length
2021-11-06 03:02:43 -04:00
_osd_weight_length = len ( str ( osd_information [ " stats " ] [ " weight " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _osd_weight_length > osd_weight_length :
osd_weight_length = _osd_weight_length
# Set the reweight and length
2021-11-06 03:02:43 -04:00
_osd_reweight_length = len ( str ( osd_information [ " stats " ] [ " reweight " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _osd_reweight_length > osd_reweight_length :
osd_reweight_length = _osd_reweight_length
# Set the pgs and length
2021-11-06 03:02:43 -04:00
_osd_pgs_length = len ( str ( osd_information [ " stats " ] [ " pgs " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _osd_pgs_length > osd_pgs_length :
osd_pgs_length = _osd_pgs_length
# Set the used/available/utlization%/variance and lengths
2021-11-06 03:02:43 -04:00
_osd_used_length = len ( osd_information [ " stats " ] [ " used " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_used_length > osd_used_length :
osd_used_length = _osd_used_length
2021-11-06 03:02:43 -04:00
_osd_free_length = len ( osd_information [ " stats " ] [ " avail " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_free_length > osd_free_length :
osd_free_length = _osd_free_length
2021-11-06 03:02:43 -04:00
osd_util = round ( osd_information [ " stats " ] [ " utilization " ] , 2 )
2019-12-25 14:10:23 -05:00
_osd_util_length = len ( str ( osd_util ) ) + 1
if _osd_util_length > osd_util_length :
osd_util_length = _osd_util_length
2021-11-06 03:02:43 -04:00
osd_var = round ( osd_information [ " stats " ] [ " var " ] , 2 )
2019-12-25 14:10:23 -05:00
_osd_var_length = len ( str ( osd_var ) ) + 1
if _osd_var_length > osd_var_length :
osd_var_length = _osd_var_length
# Set the read/write IOPS/data and length
2021-11-06 03:02:43 -04:00
_osd_wrops_length = len ( osd_information [ " stats " ] [ " wr_ops " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_wrops_length > osd_wrops_length :
osd_wrops_length = _osd_wrops_length
2021-11-06 03:02:43 -04:00
_osd_wrdata_length = len ( osd_information [ " stats " ] [ " wr_data " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_wrdata_length > osd_wrdata_length :
osd_wrdata_length = _osd_wrdata_length
2021-11-06 03:02:43 -04:00
_osd_rdops_length = len ( osd_information [ " stats " ] [ " rd_ops " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_rdops_length > osd_rdops_length :
osd_rdops_length = _osd_rdops_length
2021-11-06 03:02:43 -04:00
_osd_rddata_length = len ( osd_information [ " stats " ] [ " rd_data " ] ) + 1
2019-12-25 14:10:23 -05:00
if _osd_rddata_length > osd_rddata_length :
osd_rddata_length = _osd_rddata_length
# Format the output header
2021-11-06 03:02:43 -04:00
osd_list_output . append (
" {bold} { osd_header: < {osd_header_length} } { state_header: < {state_header_length} } { details_header: < {details_header_length} } { read_header: < {read_header_length} } { write_header: < {write_header_length} } {end_bold} " . format (
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
osd_header_length = osd_id_length
+ osd_node_length
+ osd_device_length
+ osd_db_device_length
+ 3 ,
state_header_length = osd_up_length + osd_in_length + 1 ,
details_header_length = osd_size_length
+ osd_pgs_length
+ osd_weight_length
+ osd_reweight_length
+ osd_used_length
+ osd_free_length
+ osd_util_length
+ osd_var_length
+ 7 ,
read_header_length = osd_rdops_length + osd_rddata_length + 1 ,
write_header_length = osd_wrops_length + osd_wrdata_length + 1 ,
osd_header = " OSDs "
+ " " . join (
[
" - "
for _ in range (
5 ,
osd_id_length
+ osd_node_length
+ osd_device_length
+ osd_db_device_length
+ 2 ,
)
]
) ,
state_header = " State "
+ " " . join ( [ " - " for _ in range ( 6 , osd_up_length + osd_in_length ) ] ) ,
details_header = " Details "
+ " " . join (
[
" - "
for _ in range (
8 ,
osd_size_length
+ osd_pgs_length
+ osd_weight_length
+ osd_reweight_length
+ osd_used_length
+ osd_free_length
+ osd_util_length
+ osd_var_length
+ 6 ,
)
]
) ,
read_header = " Read "
+ " " . join ( [ " - " for _ in range ( 5 , osd_rdops_length + osd_rddata_length ) ] ) ,
write_header = " Write "
+ " " . join ( [ " - " for _ in range ( 6 , osd_wrops_length + osd_wrdata_length ) ] ) ,
)
2021-07-05 12:57:18 -04:00
)
2021-11-06 03:02:43 -04:00
osd_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ osd_id : < { osd_id_length } } \
{ osd_node : < { osd_node_length } } \
2021-09-23 13:59:49 -04:00
{ osd_device : < { osd_device_length } } \
{ osd_db_device : < { osd_db_device_length } } \
2019-12-25 14:10:23 -05:00
{ osd_up : < { osd_up_length } } \
{ osd_in : < { osd_in_length } } \
{ osd_size : < { osd_size_length } } \
{ osd_pgs : < { osd_pgs_length } } \
{ osd_weight : < { osd_weight_length } } \
{ osd_reweight : < { osd_reweight_length } } \
2021-07-05 12:57:18 -04:00
{ osd_used : < { osd_used_length } } \
2019-12-25 14:10:23 -05:00
{ osd_free : < { osd_free_length } } \
{ osd_util : < { osd_util_length } } \
{ osd_var : < { osd_var_length } } \
2021-07-05 12:57:18 -04:00
{ osd_rdops : < { osd_rdops_length } } \
2019-12-25 14:10:23 -05:00
{ osd_rddata : < { osd_rddata_length } } \
2021-07-05 12:57:18 -04:00
{ osd_wrops : < { osd_wrops_length } } \
2019-12-25 14:10:23 -05:00
{ osd_wrdata : < { osd_wrdata_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
osd_id_length = osd_id_length ,
osd_node_length = osd_node_length ,
osd_device_length = osd_device_length ,
osd_db_device_length = osd_db_device_length ,
osd_up_length = osd_up_length ,
osd_in_length = osd_in_length ,
osd_size_length = osd_size_length ,
osd_pgs_length = osd_pgs_length ,
osd_weight_length = osd_weight_length ,
osd_reweight_length = osd_reweight_length ,
osd_used_length = osd_used_length ,
osd_free_length = osd_free_length ,
osd_util_length = osd_util_length ,
osd_var_length = osd_var_length ,
osd_wrops_length = osd_wrops_length ,
osd_wrdata_length = osd_wrdata_length ,
osd_rdops_length = osd_rdops_length ,
osd_rddata_length = osd_rddata_length ,
osd_id = " ID " ,
osd_node = " Node " ,
osd_device = " Block " ,
osd_db_device = " DB Block " ,
osd_up = " Up " ,
osd_in = " In " ,
osd_size = " Size " ,
osd_pgs = " PGs " ,
osd_weight = " Wt " ,
osd_reweight = " ReWt " ,
osd_used = " Used " ,
osd_free = " Free " ,
osd_util = " Util % " ,
osd_var = " Var " ,
osd_wrops = " OPS " ,
osd_wrdata = " Data " ,
osd_rdops = " OPS " ,
osd_rddata = " Data " ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
for osd_information in sorted ( osd_list , key = lambda x : int ( x [ " id " ] ) ) :
2019-12-25 14:10:23 -05:00
try :
# If this happens, the node hasn't checked in fully yet, so just ignore it
2021-11-06 03:02:43 -04:00
if osd_information [ " stats " ] [ " node " ] == " | " :
2019-12-25 14:10:23 -05:00
continue
except KeyError :
continue
2021-11-06 03:02:43 -04:00
osd_up_flag , osd_up_colour , osd_in_flag , osd_in_colour = getOutputColoursOSD (
osd_information
)
osd_util = round ( osd_information [ " stats " ] [ " utilization " ] , 2 )
osd_var = round ( osd_information [ " stats " ] [ " var " ] , 2 )
2019-12-25 14:10:23 -05:00
2021-11-06 03:02:43 -04:00
osd_db_device = osd_information [ " db_device " ]
2021-09-23 13:59:49 -04:00
if not osd_db_device :
2021-11-06 03:02:43 -04:00
osd_db_device = " N/A "
2021-09-23 13:59:49 -04:00
2019-12-25 14:10:23 -05:00
# Format the output header
2021-11-06 03:02:43 -04:00
osd_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ osd_id : < { osd_id_length } } \
{ osd_node : < { osd_node_length } } \
2021-09-23 13:59:49 -04:00
{ osd_device : < { osd_device_length } } \
{ osd_db_device : < { osd_db_device_length } } \
2019-12-25 14:10:23 -05:00
{ osd_up_colour } { osd_up : < { osd_up_length } } { end_colour } \
{ osd_in_colour } { osd_in : < { osd_in_length } } { end_colour } \
{ osd_size : < { osd_size_length } } \
{ osd_pgs : < { osd_pgs_length } } \
{ osd_weight : < { osd_weight_length } } \
{ osd_reweight : < { osd_reweight_length } } \
2021-07-05 12:57:18 -04:00
{ osd_used : < { osd_used_length } } \
2019-12-25 14:10:23 -05:00
{ osd_free : < { osd_free_length } } \
{ osd_util : < { osd_util_length } } \
{ osd_var : < { osd_var_length } } \
2021-07-05 12:57:18 -04:00
{ osd_rdops : < { osd_rdops_length } } \
2019-12-25 14:10:23 -05:00
{ osd_rddata : < { osd_rddata_length } } \
2021-07-05 12:57:18 -04:00
{ osd_wrops : < { osd_wrops_length } } \
2019-12-25 14:10:23 -05:00
{ osd_wrdata : < { osd_wrdata_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
end_colour = ansiprint . end ( ) ,
osd_id_length = osd_id_length ,
osd_node_length = osd_node_length ,
osd_device_length = osd_device_length ,
osd_db_device_length = osd_db_device_length ,
osd_up_length = osd_up_length ,
osd_in_length = osd_in_length ,
osd_size_length = osd_size_length ,
osd_pgs_length = osd_pgs_length ,
osd_weight_length = osd_weight_length ,
osd_reweight_length = osd_reweight_length ,
osd_used_length = osd_used_length ,
osd_free_length = osd_free_length ,
osd_util_length = osd_util_length ,
osd_var_length = osd_var_length ,
osd_wrops_length = osd_wrops_length ,
osd_wrdata_length = osd_wrdata_length ,
osd_rdops_length = osd_rdops_length ,
osd_rddata_length = osd_rddata_length ,
osd_id = osd_information [ " id " ] ,
osd_node = osd_information [ " stats " ] [ " node " ] ,
osd_device = osd_information [ " device " ] ,
osd_db_device = osd_db_device ,
osd_up_colour = osd_up_colour ,
osd_up = osd_up_flag ,
osd_in_colour = osd_in_colour ,
osd_in = osd_in_flag ,
osd_size = osd_information [ " stats " ] [ " size " ] ,
osd_pgs = osd_information [ " stats " ] [ " pgs " ] ,
osd_weight = osd_information [ " stats " ] [ " weight " ] ,
osd_reweight = osd_information [ " stats " ] [ " reweight " ] ,
osd_used = osd_information [ " stats " ] [ " used " ] ,
osd_free = osd_information [ " stats " ] [ " avail " ] ,
osd_util = osd_util ,
osd_var = osd_var ,
osd_wrops = osd_information [ " stats " ] [ " wr_ops " ] ,
osd_wrdata = osd_information [ " stats " ] [ " wr_data " ] ,
osd_rdops = osd_information [ " stats " ] [ " rd_ops " ] ,
osd_rddata = osd_information [ " stats " ] [ " rd_data " ] ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
return " \n " . join ( osd_list_output )
2019-12-25 14:10:23 -05:00
#
# Pool functions
#
2019-12-29 20:33:51 -05:00
def ceph_pool_info ( config , pool ) :
"""
Get information about Ceph OSD
API endpoint : GET / api / v1 / storage / ceph / pool / { pool }
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/pool/ {pool} " . format ( pool = pool ) )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
2020-12-02 19:15:33 -05:00
if isinstance ( response . json ( ) , list ) and len ( response . json ( ) ) != 1 :
# No exact match; return not found
return False , " Pool not found. "
else :
# Return a single instance if the response is a list
if isinstance ( response . json ( ) , list ) :
return True , response . json ( ) [ 0 ]
# This shouldn't happen, but is here just in case
else :
return True , response . json ( )
2019-12-29 20:33:51 -05:00
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_pool_list ( config , limit ) :
"""
2021-12-28 21:41:41 -05:00
Get list information about Ceph pools ( limited by { limit } )
2019-12-29 20:33:51 -05:00
API endpoint : GET / api / v1 / storage / ceph / pool
API arguments : limit = { limit }
API schema : [ { json_data_object } , { json_data_object } , etc . ]
"""
params = dict ( )
if limit :
2021-11-06 03:02:43 -04:00
params [ " limit " ] = limit
2019-12-29 20:33:51 -05:00
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/pool " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2021-12-28 20:39:50 -05:00
def ceph_pool_add ( config , pool , pgs , replcfg , tier ) :
2019-12-29 20:33:51 -05:00
"""
2021-12-28 21:41:41 -05:00
Add new Ceph pool
2019-12-29 20:33:51 -05:00
API endpoint : POST / api / v1 / storage / ceph / pool
2021-12-28 20:39:50 -05:00
API arguments : pool = { pool } , pgs = { pgs } , replcfg = { replcfg } , tier = { tier }
2019-12-29 20:33:51 -05:00
API schema : { " message " : " {data} " }
"""
2021-12-28 20:39:50 -05:00
params = { " pool " : pool , " pgs " : pgs , " replcfg " : replcfg , " tier " : tier }
2021-11-06 03:02:43 -04:00
response = call_api ( config , " post " , " /storage/ceph/pool " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_pool_remove ( config , pool ) :
"""
2021-12-28 21:41:41 -05:00
Remove Ceph pool
2019-12-29 20:33:51 -05:00
2020-01-06 10:47:01 -05:00
API endpoint : DELETE / api / v1 / storage / ceph / pool / { pool }
2019-12-29 20:33:51 -05:00
API arguments :
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " yes-i-really-mean-it " : " yes " }
response = call_api (
config , " delete " , " /storage/ceph/pool/ {pool} " . format ( pool = pool ) , params = params
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
2021-12-28 21:41:41 -05:00
retstatus = True
else :
retstatus = False
return retstatus , response . json ( ) . get ( " message " , " " )
def ceph_pool_set_pgs ( config , pool , pgs ) :
"""
Set the PGs of a Ceph pool
API endpoint : PUT / api / v1 / storage / ceph / pool / { pool }
API arguments : { " pgs " : " {pgs} " }
API schema : { " message " : " {data} " }
"""
params = { " pgs " : pgs }
response = call_api (
config , " put " , " /storage/ceph/pool/ {pool} " . format ( pool = pool ) , params = params
)
if response . status_code == 200 :
2019-12-29 20:33:51 -05:00
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_list_pool ( pool_list ) :
2019-12-29 20:33:51 -05:00
# Handle empty list
if not pool_list :
pool_list = list ( )
2019-12-25 14:10:23 -05:00
pool_list_output = [ ]
pool_name_length = 5
pool_id_length = 3
2021-12-28 20:39:50 -05:00
pool_tier_length = 5
2021-12-28 21:08:04 -05:00
pool_pgs_length = 4
2019-12-25 14:10:23 -05:00
pool_used_length = 5
2021-07-05 12:57:18 -04:00
pool_usedpct_length = 6
2019-12-25 14:10:23 -05:00
pool_free_length = 5
pool_num_objects_length = 6
pool_num_clones_length = 7
pool_num_copies_length = 7
pool_num_degraded_length = 9
pool_read_ops_length = 4
pool_read_data_length = 5
pool_write_ops_length = 4
pool_write_data_length = 5
for pool_information in pool_list :
# Deal with the size to human readable
2021-11-06 03:02:43 -04:00
for datatype in [ " free_bytes " , " used_bytes " , " write_bytes " , " read_bytes " ] :
databytes = pool_information [ " stats " ] [ datatype ]
2019-12-25 14:10:23 -05:00
databytes_formatted = format_bytes_tohuman ( int ( databytes ) )
2021-11-06 03:02:43 -04:00
pool_information [ " stats " ] [ datatype ] = databytes_formatted
for datatype in [ " write_ops " , " read_ops " ] :
dataops = pool_information [ " stats " ] [ datatype ]
2019-12-25 14:10:23 -05:00
dataops_formatted = format_ops_tohuman ( int ( dataops ) )
2021-11-06 03:02:43 -04:00
pool_information [ " stats " ] [ datatype ] = dataops_formatted
for datatype in [ " used_percent " ] :
datapct = pool_information [ " stats " ] [ datatype ]
2019-12-25 14:10:23 -05:00
datapct_formatted = format_pct_tohuman ( float ( datapct ) )
2021-11-06 03:02:43 -04:00
pool_information [ " stats " ] [ datatype ] = datapct_formatted
2019-12-25 14:10:23 -05:00
# Set the Pool name length
2021-11-06 03:02:43 -04:00
_pool_name_length = len ( pool_information [ " name " ] ) + 1
2019-12-25 14:10:23 -05:00
if _pool_name_length > pool_name_length :
pool_name_length = _pool_name_length
# Set the id and length
2021-11-06 03:02:43 -04:00
_pool_id_length = len ( str ( pool_information [ " stats " ] [ " id " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_id_length > pool_id_length :
pool_id_length = _pool_id_length
2021-12-28 20:39:50 -05:00
# Set the tier and length
_pool_tier_length = len ( str ( pool_information [ " tier " ] ) ) + 1
if _pool_tier_length > pool_tier_length :
pool_tier_length = _pool_tier_length
2021-12-28 21:08:04 -05:00
# Set the pgs and length
_pool_pgs_length = len ( str ( pool_information [ " pgs " ] ) ) + 1
if _pool_pgs_length > pool_pgs_length :
pool_pgs_length = _pool_pgs_length
2019-12-25 14:10:23 -05:00
# Set the used and length
2021-11-06 03:02:43 -04:00
_pool_used_length = len ( str ( pool_information [ " stats " ] [ " used_bytes " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_used_length > pool_used_length :
pool_used_length = _pool_used_length
# Set the usedpct and length
2021-11-06 03:02:43 -04:00
_pool_usedpct_length = len ( str ( pool_information [ " stats " ] [ " used_percent " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_usedpct_length > pool_usedpct_length :
pool_usedpct_length = _pool_usedpct_length
# Set the free and length
2021-11-06 03:02:43 -04:00
_pool_free_length = len ( str ( pool_information [ " stats " ] [ " free_bytes " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_free_length > pool_free_length :
pool_free_length = _pool_free_length
# Set the num_objects and length
2021-11-06 03:02:43 -04:00
_pool_num_objects_length = (
len ( str ( pool_information [ " stats " ] [ " num_objects " ] ) ) + 1
)
2019-12-25 14:10:23 -05:00
if _pool_num_objects_length > pool_num_objects_length :
pool_num_objects_length = _pool_num_objects_length
# Set the num_clones and length
2021-11-06 03:02:43 -04:00
_pool_num_clones_length = (
len ( str ( pool_information [ " stats " ] [ " num_object_clones " ] ) ) + 1
)
2019-12-25 14:10:23 -05:00
if _pool_num_clones_length > pool_num_clones_length :
pool_num_clones_length = _pool_num_clones_length
# Set the num_copies and length
2021-11-06 03:02:43 -04:00
_pool_num_copies_length = (
len ( str ( pool_information [ " stats " ] [ " num_object_copies " ] ) ) + 1
)
2019-12-25 14:10:23 -05:00
if _pool_num_copies_length > pool_num_copies_length :
pool_num_copies_length = _pool_num_copies_length
# Set the num_degraded and length
2021-11-06 03:02:43 -04:00
_pool_num_degraded_length = (
len ( str ( pool_information [ " stats " ] [ " num_objects_degraded " ] ) ) + 1
)
2019-12-25 14:10:23 -05:00
if _pool_num_degraded_length > pool_num_degraded_length :
pool_num_degraded_length = _pool_num_degraded_length
# Set the read/write IOPS/data and length
2021-11-06 03:02:43 -04:00
_pool_write_ops_length = len ( str ( pool_information [ " stats " ] [ " write_ops " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_write_ops_length > pool_write_ops_length :
pool_write_ops_length = _pool_write_ops_length
2021-11-06 03:02:43 -04:00
_pool_write_data_length = len ( pool_information [ " stats " ] [ " write_bytes " ] ) + 1
2019-12-25 14:10:23 -05:00
if _pool_write_data_length > pool_write_data_length :
pool_write_data_length = _pool_write_data_length
2021-11-06 03:02:43 -04:00
_pool_read_ops_length = len ( str ( pool_information [ " stats " ] [ " read_ops " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _pool_read_ops_length > pool_read_ops_length :
pool_read_ops_length = _pool_read_ops_length
2021-11-06 03:02:43 -04:00
_pool_read_data_length = len ( pool_information [ " stats " ] [ " read_bytes " ] ) + 1
2019-12-25 14:10:23 -05:00
if _pool_read_data_length > pool_read_data_length :
pool_read_data_length = _pool_read_data_length
# Format the output header
2021-11-06 03:02:43 -04:00
pool_list_output . append (
" {bold} { pool_header: < {pool_header_length} } { objects_header: < {objects_header_length} } { read_header: < {read_header_length} } { write_header: < {write_header_length} } {end_bold} " . format (
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
pool_header_length = pool_id_length
+ pool_name_length
2021-12-28 20:39:50 -05:00
+ pool_tier_length
2021-12-28 21:08:04 -05:00
+ pool_pgs_length
2021-11-06 03:02:43 -04:00
+ pool_used_length
+ pool_usedpct_length
+ pool_free_length
2021-12-28 21:08:04 -05:00
+ 6 ,
2021-11-06 03:02:43 -04:00
objects_header_length = pool_num_objects_length
+ pool_num_clones_length
+ pool_num_copies_length
+ pool_num_degraded_length
+ 3 ,
read_header_length = pool_read_ops_length + pool_read_data_length + 1 ,
write_header_length = pool_write_ops_length + pool_write_data_length + 1 ,
pool_header = " Pools "
+ " " . join (
[
" - "
for _ in range (
6 ,
pool_id_length
+ pool_name_length
2021-12-28 21:08:04 -05:00
+ pool_tier_length
+ pool_pgs_length
2021-11-06 03:02:43 -04:00
+ pool_used_length
+ pool_usedpct_length
+ pool_free_length
2021-12-28 21:08:04 -05:00
+ 5 ,
2021-11-06 03:02:43 -04:00
)
]
) ,
objects_header = " Objects "
+ " " . join (
[
" - "
for _ in range (
8 ,
pool_num_objects_length
+ pool_num_clones_length
+ pool_num_copies_length
+ pool_num_degraded_length
+ 2 ,
)
]
) ,
read_header = " Read "
+ " " . join (
[ " - " for _ in range ( 5 , pool_read_ops_length + pool_read_data_length ) ]
) ,
write_header = " Write "
+ " " . join (
[ " - " for _ in range ( 6 , pool_write_ops_length + pool_write_data_length ) ]
) ,
)
2021-07-05 12:57:18 -04:00
)
2021-11-06 03:02:43 -04:00
pool_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ pool_id : < { pool_id_length } } \
{ pool_name : < { pool_name_length } } \
2021-12-28 20:39:50 -05:00
{ pool_tier : < { pool_tier_length } } \
2021-12-28 21:08:04 -05:00
{ pool_pgs : < { pool_pgs_length } } \
2019-12-25 14:10:23 -05:00
{ pool_used : < { pool_used_length } } \
{ pool_usedpct : < { pool_usedpct_length } } \
{ pool_free : < { pool_free_length } } \
2021-07-05 12:57:18 -04:00
{ pool_objects : < { pool_objects_length } } \
2019-12-25 14:10:23 -05:00
{ pool_clones : < { pool_clones_length } } \
{ pool_copies : < { pool_copies_length } } \
{ pool_degraded : < { pool_degraded_length } } \
2021-07-05 12:57:18 -04:00
{ pool_read_ops : < { pool_read_ops_length } } \
2019-12-25 14:10:23 -05:00
{ pool_read_data : < { pool_read_data_length } } \
2021-07-05 12:57:18 -04:00
{ pool_write_ops : < { pool_write_ops_length } } \
2019-12-25 14:10:23 -05:00
{ pool_write_data : < { pool_write_data_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
pool_id_length = pool_id_length ,
pool_name_length = pool_name_length ,
2021-12-28 20:39:50 -05:00
pool_tier_length = pool_tier_length ,
2021-12-28 21:08:04 -05:00
pool_pgs_length = pool_pgs_length ,
2021-11-06 03:02:43 -04:00
pool_used_length = pool_used_length ,
pool_usedpct_length = pool_usedpct_length ,
pool_free_length = pool_free_length ,
pool_objects_length = pool_num_objects_length ,
pool_clones_length = pool_num_clones_length ,
pool_copies_length = pool_num_copies_length ,
pool_degraded_length = pool_num_degraded_length ,
pool_write_ops_length = pool_write_ops_length ,
pool_write_data_length = pool_write_data_length ,
pool_read_ops_length = pool_read_ops_length ,
pool_read_data_length = pool_read_data_length ,
pool_id = " ID " ,
pool_name = " Name " ,
2021-12-28 20:39:50 -05:00
pool_tier = " Tier " ,
2021-12-28 21:08:04 -05:00
pool_pgs = " PGs " ,
2021-11-06 03:02:43 -04:00
pool_used = " Used " ,
pool_usedpct = " Used % " ,
pool_free = " Free " ,
pool_objects = " Count " ,
pool_clones = " Clones " ,
pool_copies = " Copies " ,
pool_degraded = " Degraded " ,
pool_write_ops = " OPS " ,
pool_write_data = " Data " ,
pool_read_ops = " OPS " ,
pool_read_data = " Data " ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
for pool_information in sorted ( pool_list , key = lambda x : int ( x [ " stats " ] [ " id " ] ) ) :
2019-12-25 14:10:23 -05:00
# Format the output header
2021-11-06 03:02:43 -04:00
pool_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ pool_id : < { pool_id_length } } \
{ pool_name : < { pool_name_length } } \
2021-12-28 20:39:50 -05:00
{ pool_tier : < { pool_tier_length } } \
2021-12-28 21:08:04 -05:00
{ pool_pgs : < { pool_pgs_length } } \
2019-12-25 14:10:23 -05:00
{ pool_used : < { pool_used_length } } \
{ pool_usedpct : < { pool_usedpct_length } } \
{ pool_free : < { pool_free_length } } \
2021-07-05 12:57:18 -04:00
{ pool_objects : < { pool_objects_length } } \
2019-12-25 14:10:23 -05:00
{ pool_clones : < { pool_clones_length } } \
{ pool_copies : < { pool_copies_length } } \
{ pool_degraded : < { pool_degraded_length } } \
2021-07-05 12:57:18 -04:00
{ pool_read_ops : < { pool_read_ops_length } } \
2019-12-25 14:10:23 -05:00
{ pool_read_data : < { pool_read_data_length } } \
2021-07-05 12:57:18 -04:00
{ pool_write_ops : < { pool_write_ops_length } } \
2019-12-25 14:10:23 -05:00
{ pool_write_data : < { pool_write_data_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
pool_id_length = pool_id_length ,
pool_name_length = pool_name_length ,
2021-12-28 20:39:50 -05:00
pool_tier_length = pool_tier_length ,
2021-12-28 21:08:04 -05:00
pool_pgs_length = pool_pgs_length ,
2021-11-06 03:02:43 -04:00
pool_used_length = pool_used_length ,
pool_usedpct_length = pool_usedpct_length ,
pool_free_length = pool_free_length ,
pool_objects_length = pool_num_objects_length ,
pool_clones_length = pool_num_clones_length ,
pool_copies_length = pool_num_copies_length ,
pool_degraded_length = pool_num_degraded_length ,
pool_write_ops_length = pool_write_ops_length ,
pool_write_data_length = pool_write_data_length ,
pool_read_ops_length = pool_read_ops_length ,
pool_read_data_length = pool_read_data_length ,
pool_id = pool_information [ " stats " ] [ " id " ] ,
pool_name = pool_information [ " name " ] ,
2021-12-28 20:39:50 -05:00
pool_tier = pool_information [ " tier " ] ,
2021-12-28 21:08:04 -05:00
pool_pgs = pool_information [ " pgs " ] ,
2021-11-06 03:02:43 -04:00
pool_used = pool_information [ " stats " ] [ " used_bytes " ] ,
pool_usedpct = pool_information [ " stats " ] [ " used_percent " ] ,
pool_free = pool_information [ " stats " ] [ " free_bytes " ] ,
pool_objects = pool_information [ " stats " ] [ " num_objects " ] ,
pool_clones = pool_information [ " stats " ] [ " num_object_clones " ] ,
pool_copies = pool_information [ " stats " ] [ " num_object_copies " ] ,
pool_degraded = pool_information [ " stats " ] [ " num_objects_degraded " ] ,
pool_write_ops = pool_information [ " stats " ] [ " write_ops " ] ,
pool_write_data = pool_information [ " stats " ] [ " write_bytes " ] ,
pool_read_ops = pool_information [ " stats " ] [ " read_ops " ] ,
pool_read_data = pool_information [ " stats " ] [ " read_bytes " ] ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
return " \n " . join ( pool_list_output )
2020-05-29 13:34:12 -04:00
2019-12-25 14:10:23 -05:00
#
# Volume functions
#
2019-12-29 20:33:51 -05:00
def ceph_volume_info ( config , pool , volume ) :
"""
Get information about Ceph volume
API endpoint : GET / api / v1 / storage / ceph / volume / { pool } / { volume }
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" get " ,
" /storage/ceph/volume/ {pool} / {volume} " . format ( volume = volume , pool = pool ) ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
2020-12-02 19:15:33 -05:00
if isinstance ( response . json ( ) , list ) and len ( response . json ( ) ) != 1 :
# No exact match; return not found
return False , " Volume not found. "
else :
# Return a single instance if the response is a list
if isinstance ( response . json ( ) , list ) :
return True , response . json ( ) [ 0 ]
# This shouldn't happen, but is here just in case
else :
return True , response . json ( )
2019-12-29 20:33:51 -05:00
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_volume_list ( config , limit , pool ) :
"""
Get list information about Ceph volumes ( limited by { limit } and by { pool } )
API endpoint : GET / api / v1 / storage / ceph / volume
API arguments : limit = { limit } , pool = { pool }
API schema : [ { json_data_object } , { json_data_object } , etc . ]
"""
params = dict ( )
if limit :
2021-11-06 03:02:43 -04:00
params [ " limit " ] = limit
2019-12-29 20:33:51 -05:00
if pool :
2021-11-06 03:02:43 -04:00
params [ " pool " ] = pool
2019-12-29 20:33:51 -05:00
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/volume " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_volume_add ( config , pool , volume , size ) :
"""
Add new Ceph volume
API endpoint : POST / api / v1 / storage / ceph / volume
API arguments : volume = { volume } , pool = { pool } , size = { size }
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " volume " : volume , " pool " : pool , " size " : size }
response = call_api ( config , " post " , " /storage/ceph/volume " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2020-02-09 20:42:56 -05:00
def ceph_volume_upload ( config , pool , volume , image_format , image_file ) :
"""
Upload a disk image to a Ceph volume
API endpoint : POST / api / v1 / storage / ceph / volume / { pool } / { volume } / upload
API arguments : image_format = { image_format }
API schema : { " message " : " {data} " }
"""
2020-02-20 22:40:49 -05:00
import click
2021-11-06 03:02:43 -04:00
bar = UploadProgressBar (
image_file , end_message = " Parsing file on remote side... " , end_nl = False
)
2020-02-20 22:40:49 -05:00
upload_data = MultipartEncoder (
2021-11-06 03:02:43 -04:00
fields = {
" file " : ( " filename " , open ( image_file , " rb " ) , " application/octet-stream " )
}
2020-02-20 22:40:49 -05:00
)
upload_monitor = MultipartEncoderMonitor ( upload_data , bar . update )
2021-11-06 03:02:43 -04:00
headers = { " Content-Type " : upload_monitor . content_type }
params = { " image_format " : image_format }
2020-02-20 22:40:49 -05:00
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" post " ,
" /storage/ceph/volume/ {} / {} /upload " . format ( pool , volume ) ,
headers = headers ,
params = params ,
data = upload_monitor ,
)
2020-02-20 22:40:49 -05:00
click . echo ( " done. " )
click . echo ( )
2020-02-09 20:42:56 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2020-02-09 20:42:56 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_volume_remove ( config , pool , volume ) :
"""
Remove Ceph volume
API endpoint : DELETE / api / v1 / storage / ceph / volume / { pool } / { volume }
API arguments :
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" delete " ,
" /storage/ceph/volume/ {pool} / {volume} " . format ( volume = volume , pool = pool ) ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_volume_modify ( config , pool , volume , new_name = None , new_size = None ) :
"""
Modify Ceph volume
API endpoint : PUT / api / v1 / storage / ceph / volume / { pool } / { volume }
API arguments :
API schema : { " message " : " {data} " }
"""
params = dict ( )
if new_name :
2021-11-06 03:02:43 -04:00
params [ " new_name " ] = new_name
2019-12-29 20:33:51 -05:00
if new_size :
2021-11-06 03:02:43 -04:00
params [ " new_size " ] = new_size
2019-12-29 20:33:51 -05:00
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" put " ,
" /storage/ceph/volume/ {pool} / {volume} " . format ( volume = volume , pool = pool ) ,
params = params ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_volume_clone ( config , pool , volume , new_volume ) :
"""
Clone Ceph volume
API endpoint : POST / api / v1 / storage / ceph / volume / { pool } / { volume }
API arguments : new_volume = { new_volume
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " new_volume " : new_volume }
response = call_api (
config ,
" post " ,
" /storage/ceph/volume/ {pool} / {volume} /clone " . format ( volume = volume , pool = pool ) ,
params = params ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_list_volume ( volume_list ) :
2019-12-29 20:33:51 -05:00
# Handle empty list
if not volume_list :
volume_list = list ( )
2019-12-25 14:10:23 -05:00
volume_list_output = [ ]
volume_name_length = 5
volume_pool_length = 5
volume_size_length = 5
volume_objects_length = 8
volume_order_length = 6
volume_format_length = 7
volume_features_length = 10
for volume_information in volume_list :
# Set the Volume name length
2021-11-06 03:02:43 -04:00
_volume_name_length = len ( volume_information [ " name " ] ) + 1
2019-12-25 14:10:23 -05:00
if _volume_name_length > volume_name_length :
volume_name_length = _volume_name_length
# Set the Volume pool length
2021-11-06 03:02:43 -04:00
_volume_pool_length = len ( volume_information [ " pool " ] ) + 1
2019-12-25 14:10:23 -05:00
if _volume_pool_length > volume_pool_length :
volume_pool_length = _volume_pool_length
# Set the size and length
2021-11-06 03:02:43 -04:00
_volume_size_length = len ( str ( volume_information [ " stats " ] [ " size " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _volume_size_length > volume_size_length :
volume_size_length = _volume_size_length
# Set the num_objects and length
2021-11-06 03:02:43 -04:00
_volume_objects_length = len ( str ( volume_information [ " stats " ] [ " objects " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _volume_objects_length > volume_objects_length :
volume_objects_length = _volume_objects_length
# Set the order and length
2021-11-06 03:02:43 -04:00
_volume_order_length = len ( str ( volume_information [ " stats " ] [ " order " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _volume_order_length > volume_order_length :
volume_order_length = _volume_order_length
# Set the format and length
2021-11-06 03:02:43 -04:00
_volume_format_length = len ( str ( volume_information [ " stats " ] [ " format " ] ) ) + 1
2019-12-25 14:10:23 -05:00
if _volume_format_length > volume_format_length :
volume_format_length = _volume_format_length
# Set the features and length
2021-11-06 03:02:43 -04:00
_volume_features_length = (
len ( str ( " , " . join ( volume_information [ " stats " ] [ " features " ] ) ) ) + 1
)
2019-12-25 14:10:23 -05:00
if _volume_features_length > volume_features_length :
volume_features_length = _volume_features_length
# Format the output header
2021-11-06 03:02:43 -04:00
volume_list_output . append (
" {bold} { volume_header: < {volume_header_length} } { details_header: < {details_header_length} } {end_bold} " . format (
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
volume_header_length = volume_name_length + volume_pool_length + 1 ,
details_header_length = volume_size_length
+ volume_objects_length
+ volume_order_length
+ volume_format_length
+ volume_features_length
+ 4 ,
volume_header = " Volumes "
+ " " . join ( [ " - " for _ in range ( 8 , volume_name_length + volume_pool_length ) ] ) ,
details_header = " Details "
+ " " . join (
[
" - "
for _ in range (
8 ,
volume_size_length
+ volume_objects_length
+ volume_order_length
+ volume_format_length
+ volume_features_length
+ 3 ,
)
]
) ,
)
2021-07-05 12:57:18 -04:00
)
2021-11-06 03:02:43 -04:00
volume_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ volume_name : < { volume_name_length } } \
{ volume_pool : < { volume_pool_length } } \
{ volume_size : < { volume_size_length } } \
{ volume_objects : < { volume_objects_length } } \
{ volume_order : < { volume_order_length } } \
{ volume_format : < { volume_format_length } } \
{ volume_features : < { volume_features_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
volume_name_length = volume_name_length ,
volume_pool_length = volume_pool_length ,
volume_size_length = volume_size_length ,
volume_objects_length = volume_objects_length ,
volume_order_length = volume_order_length ,
volume_format_length = volume_format_length ,
volume_features_length = volume_features_length ,
volume_name = " Name " ,
volume_pool = " Pool " ,
volume_size = " Size " ,
volume_objects = " Objects " ,
volume_order = " Order " ,
volume_format = " Format " ,
volume_features = " Features " ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
for volume_information in sorted ( volume_list , key = lambda v : v [ " pool " ] + v [ " name " ] ) :
volume_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ volume_name : < { volume_name_length } } \
{ volume_pool : < { volume_pool_length } } \
{ volume_size : < { volume_size_length } } \
{ volume_objects : < { volume_objects_length } } \
{ volume_order : < { volume_order_length } } \
{ volume_format : < { volume_format_length } } \
{ volume_features : < { volume_features_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
volume_name_length = volume_name_length ,
volume_pool_length = volume_pool_length ,
volume_size_length = volume_size_length ,
volume_objects_length = volume_objects_length ,
volume_order_length = volume_order_length ,
volume_format_length = volume_format_length ,
volume_features_length = volume_features_length ,
volume_name = volume_information [ " name " ] ,
volume_pool = volume_information [ " pool " ] ,
volume_size = volume_information [ " stats " ] [ " size " ] ,
volume_objects = volume_information [ " stats " ] [ " objects " ] ,
volume_order = volume_information [ " stats " ] [ " order " ] ,
volume_format = volume_information [ " stats " ] [ " format " ] ,
volume_features = " , " . join ( volume_information [ " stats " ] [ " features " ] ) ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
return " \n " . join ( volume_list_output )
2019-12-25 14:10:23 -05:00
#
# Snapshot functions
#
2019-12-29 20:33:51 -05:00
def ceph_snapshot_info ( config , pool , volume , snapshot ) :
"""
Get information about Ceph snapshot
API endpoint : GET / api / v1 / storage / ceph / snapshot / { pool } / { volume } / { snapshot }
API arguments :
API schema : { json_data_object }
"""
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" get " ,
" /storage/ceph/snapshot/ {pool} / {volume} / {snapshot} " . format (
snapshot = snapshot , volume = volume , pool = pool
) ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
2020-12-02 19:15:33 -05:00
if isinstance ( response . json ( ) , list ) and len ( response . json ( ) ) != 1 :
# No exact match; return not found
return False , " Snapshot not found. "
else :
# Return a single instance if the response is a list
if isinstance ( response . json ( ) , list ) :
return True , response . json ( ) [ 0 ]
# This shouldn't happen, but is here just in case
else :
return True , response . json ( )
2019-12-29 20:33:51 -05:00
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_snapshot_list ( config , limit , volume , pool ) :
"""
Get list information about Ceph snapshots ( limited by { limit } , by { pool } , or by { volume } )
API endpoint : GET / api / v1 / storage / ceph / snapshot
API arguments : limit = { limit } , volume = { volume } , pool = { pool }
API schema : [ { json_data_object } , { json_data_object } , etc . ]
"""
params = dict ( )
if limit :
2021-11-06 03:02:43 -04:00
params [ " limit " ] = limit
2019-12-29 20:33:51 -05:00
if volume :
2021-11-06 03:02:43 -04:00
params [ " volume " ] = volume
2019-12-29 20:33:51 -05:00
if pool :
2021-11-06 03:02:43 -04:00
params [ " pool " ] = pool
2019-12-29 20:33:51 -05:00
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/snapshot " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
return True , response . json ( )
else :
2021-11-06 03:02:43 -04:00
return False , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_snapshot_add ( config , pool , volume , snapshot ) :
"""
Add new Ceph snapshot
API endpoint : POST / api / v1 / storage / ceph / snapshot
API arguments : snapshot = { snapshot } , volume = { volume } , pool = { pool }
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
params = { " snapshot " : snapshot , " volume " : volume , " pool " : pool }
response = call_api ( config , " post " , " /storage/ceph/snapshot " , params = params )
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_snapshot_remove ( config , pool , volume , snapshot ) :
"""
Remove Ceph snapshot
API endpoint : DELETE / api / v1 / storage / ceph / snapshot / { pool } / { volume } / { snapshot }
API arguments :
API schema : { " message " : " {data} " }
"""
2021-11-06 03:02:43 -04:00
response = call_api (
config ,
" delete " ,
" /storage/ceph/snapshot/ {pool} / {volume} / {snapshot} " . format (
snapshot = snapshot , volume = volume , pool = pool
) ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-29 20:33:51 -05:00
def ceph_snapshot_modify ( config , pool , volume , snapshot , new_name = None ) :
"""
Modify Ceph snapshot
API endpoint : PUT / api / v1 / storage / ceph / snapshot / { pool } / { volume } / { snapshot }
API arguments :
API schema : { " message " : " {data} " }
"""
params = dict ( )
if new_name :
2021-11-06 03:02:43 -04:00
params [ " new_name " ] = new_name
response = call_api (
config ,
" put " ,
" /storage/ceph/snapshot/ {pool} / {volume} / {snapshot} " . format (
snapshot = snapshot , volume = volume , pool = pool
) ,
params = params ,
)
2019-12-29 20:33:51 -05:00
if response . status_code == 200 :
retstatus = True
else :
retstatus = False
2021-11-06 03:02:43 -04:00
return retstatus , response . json ( ) . get ( " message " , " " )
2019-12-29 20:33:51 -05:00
2020-11-07 14:45:24 -05:00
2019-12-25 14:10:23 -05:00
def format_list_snapshot ( snapshot_list ) :
2019-12-29 20:33:51 -05:00
# Handle empty list
if not snapshot_list :
snapshot_list = list ( )
2019-12-25 14:10:23 -05:00
snapshot_list_output = [ ]
snapshot_name_length = 5
snapshot_volume_length = 7
snapshot_pool_length = 5
2020-05-29 13:34:12 -04:00
for snapshot_information in snapshot_list :
2021-11-06 03:02:43 -04:00
snapshot_name = snapshot_information [ " snapshot " ]
snapshot_volume = snapshot_information [ " volume " ]
snapshot_pool = snapshot_information [ " pool " ]
2019-12-25 14:10:23 -05:00
# Set the Snapshot name length
_snapshot_name_length = len ( snapshot_name ) + 1
if _snapshot_name_length > snapshot_name_length :
snapshot_name_length = _snapshot_name_length
# Set the Snapshot volume length
_snapshot_volume_length = len ( snapshot_volume ) + 1
if _snapshot_volume_length > snapshot_volume_length :
snapshot_volume_length = _snapshot_volume_length
# Set the Snapshot pool length
_snapshot_pool_length = len ( snapshot_pool ) + 1
if _snapshot_pool_length > snapshot_pool_length :
snapshot_pool_length = _snapshot_pool_length
# Format the output header
2021-11-06 03:02:43 -04:00
snapshot_list_output . append (
" {bold} { snapshot_header: < {snapshot_header_length} } {end_bold} " . format (
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
snapshot_header_length = snapshot_name_length
+ snapshot_volume_length
+ snapshot_pool_length
+ 2 ,
snapshot_header = " Snapshots "
+ " " . join (
[
" - "
for _ in range (
10 ,
snapshot_name_length
+ snapshot_volume_length
+ snapshot_pool_length
+ 1 ,
)
]
) ,
)
2021-07-05 12:57:18 -04:00
)
2021-11-06 03:02:43 -04:00
snapshot_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ snapshot_name : < { snapshot_name_length } } \
{ snapshot_volume : < { snapshot_volume_length } } \
{ snapshot_pool : < { snapshot_pool_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
snapshot_name_length = snapshot_name_length ,
snapshot_volume_length = snapshot_volume_length ,
snapshot_pool_length = snapshot_pool_length ,
snapshot_name = " Name " ,
snapshot_volume = " Volume " ,
snapshot_pool = " Pool " ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
for snapshot_information in sorted (
snapshot_list , key = lambda s : s [ " pool " ] + s [ " volume " ] + s [ " snapshot " ]
) :
snapshot_name = snapshot_information [ " snapshot " ]
snapshot_volume = snapshot_information [ " volume " ]
snapshot_pool = snapshot_information [ " pool " ]
snapshot_list_output . append (
" {bold} \
2019-12-25 14:10:23 -05:00
{ snapshot_name : < { snapshot_name_length } } \
{ snapshot_volume : < { snapshot_volume_length } } \
{ snapshot_pool : < { snapshot_pool_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
snapshot_name_length = snapshot_name_length ,
snapshot_volume_length = snapshot_volume_length ,
snapshot_pool_length = snapshot_pool_length ,
snapshot_name = snapshot_name ,
snapshot_volume = snapshot_volume ,
snapshot_pool = snapshot_pool ,
)
2019-12-25 14:10:23 -05:00
)
2021-11-06 03:02:43 -04:00
return " \n " . join ( snapshot_list_output )
2020-08-24 14:57:52 -04:00
2020-11-07 14:45:24 -05:00
2020-08-24 14:57:52 -04:00
#
# Benchmark functions
#
def ceph_benchmark_run ( config , pool ) :
"""
Run a storage benchmark against { pool }
API endpoint : POST / api / v1 / storage / ceph / benchmark
API arguments : pool = { pool }
API schema : { message }
"""
2021-11-06 03:02:43 -04:00
params = { " pool " : pool }
response = call_api ( config , " post " , " /storage/ceph/benchmark " , params = params )
2020-08-24 14:57:52 -04:00
if response . status_code == 202 :
retvalue = True
2021-11-06 03:02:43 -04:00
retdata = " Task ID: {} " . format ( response . json ( ) [ " task_id " ] )
2020-08-24 14:57:52 -04:00
else :
retvalue = False
2021-11-06 03:02:43 -04:00
retdata = response . json ( ) . get ( " message " , " " )
2020-11-06 19:05:48 -05:00
2020-08-24 14:57:52 -04:00
return retvalue , retdata
2020-11-07 14:45:24 -05:00
2020-08-24 14:57:52 -04:00
def ceph_benchmark_list ( config , job ) :
"""
View results of one or more previous benchmark runs
API endpoint : GET / api / v1 / storage / ceph / benchmark
API arguments : job = { job }
API schema : { results }
"""
if job is not None :
2021-11-06 03:02:43 -04:00
params = { " job " : job }
2020-08-24 14:57:52 -04:00
else :
params = { }
2021-11-06 03:02:43 -04:00
response = call_api ( config , " get " , " /storage/ceph/benchmark " , params = params )
2020-08-24 14:57:52 -04:00
if response . status_code == 200 :
retvalue = True
retdata = response . json ( )
else :
retvalue = False
2021-11-06 03:02:43 -04:00
retdata = response . json ( ) . get ( " message " , " " )
2020-08-24 14:57:52 -04:00
return retvalue , retdata
2020-11-07 14:45:24 -05:00
2021-10-02 02:47:17 -04:00
def get_benchmark_list_results_legacy ( benchmark_data ) :
2021-11-26 11:34:20 -05:00
if isinstance ( benchmark_data , str ) :
benchmark_data = loads ( benchmark_data )
2021-10-02 02:47:17 -04:00
benchmark_bandwidth = dict ( )
benchmark_iops = dict ( )
for test in [ " seq_read " , " seq_write " , " rand_read_4K " , " rand_write_4K " ] :
2021-11-06 03:02:43 -04:00
benchmark_bandwidth [ test ] = format_bytes_tohuman (
int ( benchmark_data [ test ] [ " overall " ] [ " bandwidth " ] ) * 1024
)
benchmark_iops [ test ] = format_ops_tohuman (
int ( benchmark_data [ test ] [ " overall " ] [ " iops " ] )
)
2021-10-02 02:47:17 -04:00
return benchmark_bandwidth , benchmark_iops
def get_benchmark_list_results_json ( benchmark_data ) :
benchmark_bandwidth = dict ( )
benchmark_iops = dict ( )
2021-11-06 03:02:43 -04:00
for test in [ " seq_read " , " seq_write " , " rand_read_4K " , " rand_write_4K " ] :
2021-10-02 02:47:17 -04:00
benchmark_test_data = benchmark_data [ test ]
active_class = None
2021-11-06 03:02:43 -04:00
for io_class in [ " read " , " write " ] :
if benchmark_test_data [ " jobs " ] [ 0 ] [ io_class ] [ " io_bytes " ] > 0 :
2021-10-02 02:47:17 -04:00
active_class = io_class
if active_class is not None :
2021-11-06 03:02:43 -04:00
benchmark_bandwidth [ test ] = format_bytes_tohuman (
int ( benchmark_test_data [ " jobs " ] [ 0 ] [ active_class ] [ " bw_bytes " ] )
)
benchmark_iops [ test ] = format_ops_tohuman (
int ( benchmark_test_data [ " jobs " ] [ 0 ] [ active_class ] [ " iops " ] )
)
2021-10-02 02:47:17 -04:00
return benchmark_bandwidth , benchmark_iops
def get_benchmark_list_results ( benchmark_format , benchmark_data ) :
if benchmark_format == 0 :
2021-11-06 03:02:43 -04:00
benchmark_bandwidth , benchmark_iops = get_benchmark_list_results_legacy (
benchmark_data
)
2021-10-02 02:47:17 -04:00
elif benchmark_format == 1 :
2021-11-06 03:02:43 -04:00
benchmark_bandwidth , benchmark_iops = get_benchmark_list_results_json (
benchmark_data
)
2021-10-02 02:47:17 -04:00
2021-11-06 03:02:43 -04:00
seq_benchmark_bandwidth = " {} / {} " . format (
benchmark_bandwidth [ " seq_read " ] , benchmark_bandwidth [ " seq_write " ]
)
seq_benchmark_iops = " {} / {} " . format (
benchmark_iops [ " seq_read " ] , benchmark_iops [ " seq_write " ]
)
rand_benchmark_bandwidth = " {} / {} " . format (
benchmark_bandwidth [ " rand_read_4K " ] , benchmark_bandwidth [ " rand_write_4K " ]
)
rand_benchmark_iops = " {} / {} " . format (
benchmark_iops [ " rand_read_4K " ] , benchmark_iops [ " rand_write_4K " ]
)
2021-10-02 02:47:17 -04:00
2021-11-06 03:02:43 -04:00
return (
seq_benchmark_bandwidth ,
seq_benchmark_iops ,
rand_benchmark_bandwidth ,
rand_benchmark_iops ,
)
2021-10-02 02:47:17 -04:00
2020-08-25 12:16:23 -04:00
def format_list_benchmark ( config , benchmark_information ) :
2020-08-24 14:57:52 -04:00
benchmark_list_output = [ ]
2020-11-06 19:05:48 -05:00
2020-08-24 14:57:52 -04:00
benchmark_job_length = 20
2021-10-03 15:05:58 -04:00
benchmark_format_length = 6
2020-08-24 14:57:52 -04:00
benchmark_bandwidth_length = dict ( )
benchmark_iops_length = dict ( )
2020-08-25 12:16:23 -04:00
# For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result.
2020-11-07 13:02:54 -05:00
for test in [ " seq_read " , " seq_write " , " rand_read_4K " , " rand_write_4K " ] :
2020-08-24 14:57:52 -04:00
benchmark_bandwidth_length [ test ] = 7
benchmark_iops_length [ test ] = 6
2021-07-05 12:57:18 -04:00
benchmark_seq_bw_length = 15
benchmark_seq_iops_length = 10
benchmark_rand_bw_length = 15
benchmark_rand_iops_length = 10
2020-08-24 14:57:52 -04:00
for benchmark in benchmark_information :
2021-11-06 03:02:43 -04:00
benchmark_job = benchmark [ " job " ]
2021-11-26 11:34:20 -05:00
benchmark_format = benchmark . get ( " test_format " , 0 ) # noqa: F841
2021-10-02 01:25:18 -04:00
2020-08-24 14:57:52 -04:00
_benchmark_job_length = len ( benchmark_job )
if _benchmark_job_length > benchmark_job_length :
benchmark_job_length = _benchmark_job_length
2021-11-06 03:02:43 -04:00
if benchmark [ " benchmark_result " ] == " Running " :
2020-08-24 14:57:52 -04:00
continue
2021-07-05 12:57:18 -04:00
2021-11-06 03:02:43 -04:00
benchmark_data = benchmark [ " benchmark_result " ]
(
seq_benchmark_bandwidth ,
seq_benchmark_iops ,
rand_benchmark_bandwidth ,
rand_benchmark_iops ,
) = get_benchmark_list_results ( benchmark_format , benchmark_data )
2021-07-05 12:57:18 -04:00
_benchmark_seq_bw_length = len ( seq_benchmark_bandwidth ) + 1
if _benchmark_seq_bw_length > benchmark_seq_bw_length :
benchmark_seq_bw_length = _benchmark_seq_bw_length
_benchmark_seq_iops_length = len ( seq_benchmark_iops ) + 1
if _benchmark_seq_iops_length > benchmark_seq_iops_length :
benchmark_seq_iops_length = _benchmark_seq_iops_length
_benchmark_rand_bw_length = len ( rand_benchmark_bandwidth ) + 1
if _benchmark_rand_bw_length > benchmark_rand_bw_length :
benchmark_rand_bw_length = _benchmark_rand_bw_length
2020-08-24 14:57:52 -04:00
2021-07-05 12:57:18 -04:00
_benchmark_rand_iops_length = len ( rand_benchmark_iops ) + 1
if _benchmark_rand_iops_length > benchmark_rand_iops_length :
benchmark_rand_iops_length = _benchmark_rand_iops_length
2020-08-24 14:57:52 -04:00
# Format the output header line 1
2021-11-06 03:02:43 -04:00
benchmark_list_output . append (
" {bold} \
2021-10-03 15:05:58 -04:00
{ benchmark_job : < { benchmark_job_length } } \
2021-07-05 12:57:18 -04:00
{ seq_header : < { seq_header_length } } \
{ rand_header : < { rand_header_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
benchmark_job_length = benchmark_job_length + benchmark_format_length + 1 ,
seq_header_length = benchmark_seq_bw_length + benchmark_seq_iops_length + 1 ,
rand_header_length = benchmark_rand_bw_length
+ benchmark_rand_iops_length
+ 1 ,
benchmark_job = " Benchmarks "
+ " " . join (
[
" - "
for _ in range (
11 , benchmark_job_length + benchmark_format_length + 2
)
]
) ,
seq_header = " Sequential (4M blocks) "
+ " " . join (
[
" - "
for _ in range (
23 , benchmark_seq_bw_length + benchmark_seq_iops_length
)
]
) ,
rand_header = " Random (4K blocks) "
+ " " . join (
[
" - "
for _ in range (
19 , benchmark_rand_bw_length + benchmark_rand_iops_length
)
]
) ,
)
2020-08-24 14:57:52 -04:00
)
2021-11-06 03:02:43 -04:00
benchmark_list_output . append (
" {bold} \
2021-07-05 12:57:18 -04:00
{ benchmark_job : < { benchmark_job_length } } \
2021-10-03 15:05:58 -04:00
{ benchmark_format : < { benchmark_format_length } } \
2021-07-05 12:57:18 -04:00
{ seq_benchmark_bandwidth : < { seq_benchmark_bandwidth_length } } \
{ seq_benchmark_iops : < { seq_benchmark_iops_length } } \
2020-08-24 14:57:52 -04:00
{ rand_benchmark_bandwidth : < { rand_benchmark_bandwidth_length } } \
2021-07-05 12:57:18 -04:00
{ rand_benchmark_iops : < { rand_benchmark_iops_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
benchmark_job_length = benchmark_job_length ,
benchmark_format_length = benchmark_format_length ,
seq_benchmark_bandwidth_length = benchmark_seq_bw_length ,
seq_benchmark_iops_length = benchmark_seq_iops_length ,
rand_benchmark_bandwidth_length = benchmark_rand_bw_length ,
rand_benchmark_iops_length = benchmark_rand_iops_length ,
benchmark_job = " Job " ,
benchmark_format = " Format " ,
seq_benchmark_bandwidth = " R/W Bandwith/s " ,
seq_benchmark_iops = " R/W IOPS " ,
rand_benchmark_bandwidth = " R/W Bandwith/s " ,
rand_benchmark_iops = " R/W IOPS " ,
)
2020-08-24 14:57:52 -04:00
)
for benchmark in benchmark_information :
2021-11-06 03:02:43 -04:00
benchmark_job = benchmark [ " job " ]
2021-11-26 11:34:20 -05:00
benchmark_format = benchmark . get ( " test_format " , 0 ) # noqa: F841
2021-11-06 03:02:43 -04:00
if benchmark [ " benchmark_result " ] == " Running " :
seq_benchmark_bandwidth = " Running "
seq_benchmark_iops = " Running "
rand_benchmark_bandwidth = " Running "
rand_benchmark_iops = " Running "
2020-08-24 14:57:52 -04:00
else :
2021-11-06 03:02:43 -04:00
benchmark_data = benchmark [ " benchmark_result " ]
(
seq_benchmark_bandwidth ,
seq_benchmark_iops ,
rand_benchmark_bandwidth ,
rand_benchmark_iops ,
) = get_benchmark_list_results ( benchmark_format , benchmark_data )
benchmark_list_output . append (
" {bold} \
2021-07-05 12:57:18 -04:00
{ benchmark_job : < { benchmark_job_length } } \
2021-10-03 15:05:58 -04:00
{ benchmark_format : < { benchmark_format_length } } \
2021-07-05 12:57:18 -04:00
{ seq_benchmark_bandwidth : < { seq_benchmark_bandwidth_length } } \
{ seq_benchmark_iops : < { seq_benchmark_iops_length } } \
2020-08-24 14:57:52 -04:00
{ rand_benchmark_bandwidth : < { rand_benchmark_bandwidth_length } } \
2021-07-05 12:57:18 -04:00
{ rand_benchmark_iops : < { rand_benchmark_iops_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
benchmark_job_length = benchmark_job_length ,
benchmark_format_length = benchmark_format_length ,
seq_benchmark_bandwidth_length = benchmark_seq_bw_length ,
seq_benchmark_iops_length = benchmark_seq_iops_length ,
rand_benchmark_bandwidth_length = benchmark_rand_bw_length ,
rand_benchmark_iops_length = benchmark_rand_iops_length ,
benchmark_job = benchmark_job ,
benchmark_format = benchmark_format ,
seq_benchmark_bandwidth = seq_benchmark_bandwidth ,
seq_benchmark_iops = seq_benchmark_iops ,
rand_benchmark_bandwidth = rand_benchmark_bandwidth ,
rand_benchmark_iops = rand_benchmark_iops ,
)
2020-08-24 14:57:52 -04:00
)
2021-11-06 03:02:43 -04:00
return " \n " . join ( benchmark_list_output )
2020-08-24 14:57:52 -04:00
2020-11-07 14:45:24 -05:00
2021-10-02 01:13:50 -04:00
def format_info_benchmark ( config , oformat , benchmark_information ) :
2021-10-02 01:07:25 -04:00
# This matrix is a list of the possible format functions for a benchmark result
# It is extensable in the future should newer formats be required.
benchmark_matrix = {
0 : format_info_benchmark_legacy ,
2021-10-02 04:46:44 -04:00
1 : format_info_benchmark_json ,
2021-10-02 01:07:25 -04:00
}
2021-11-06 03:02:43 -04:00
benchmark_version = benchmark_information [ 0 ] [ " test_format " ]
2021-10-02 01:13:50 -04:00
2021-11-06 03:02:43 -04:00
if oformat == " json-pretty " :
2021-10-02 01:13:50 -04:00
return dumps ( benchmark_information , indent = 4 )
2021-11-06 03:02:43 -04:00
elif oformat == " json " :
2021-10-02 01:13:50 -04:00
return dumps ( benchmark_information )
else :
2021-10-02 04:46:44 -04:00
return benchmark_matrix [ benchmark_version ] ( config , benchmark_information [ 0 ] )
2021-10-02 01:07:25 -04:00
def format_info_benchmark_legacy ( config , benchmark_information ) :
2021-11-06 03:02:43 -04:00
if benchmark_information [ " benchmark_result " ] == " Running " :
2020-08-25 12:16:23 -04:00
return " Benchmark test is still running. "
2021-11-06 03:02:43 -04:00
benchmark_details = benchmark_information [ " benchmark_result " ]
2020-08-25 12:16:23 -04:00
# Format a nice output; do this line-by-line then concat the elements at the end
ainformation = [ ]
2021-11-06 03:02:43 -04:00
ainformation . append (
" {} Storage Benchmark details: {} " . format ( ansiprint . bold ( ) , ansiprint . end ( ) )
)
2020-08-25 12:16:23 -04:00
nice_test_name_map = {
2020-08-25 13:45:58 -04:00
" seq_read " : " Sequential Read (4M blocks) " ,
" seq_write " : " Sequential Write (4M blocks) " ,
" rand_read_4M " : " Random Read (4M blocks) " ,
" rand_write_4M " : " Random Write (4M blocks) " ,
" rand_read_4K " : " Random Read (4K blocks) " ,
2021-09-28 13:26:09 -04:00
" rand_write_4K " : " Random Write (4K blocks) " ,
" rand_read_4K_lowdepth " : " Random Read (4K blocks, single-queue) " ,
" rand_write_4K_lowdepth " : " Random Write (4K blocks, single-queue) " ,
2020-08-25 12:16:23 -04:00
}
2020-08-25 13:45:58 -04:00
test_name_length = 30
2020-08-25 12:16:23 -04:00
overall_label_length = 12
overall_column_length = 8
bandwidth_label_length = 9
bandwidth_column_length = 10
iops_column_length = 6
latency_column_length = 8
cpuutil_label_length = 11
cpuutil_column_length = 9
2021-09-28 13:26:09 -04:00
# Work around old results that did not have these tests
2021-11-06 03:02:43 -04:00
if " rand_read_4K_lowdepth " not in benchmark_details :
del nice_test_name_map [ " rand_read_4K_lowdepth " ]
del nice_test_name_map [ " rand_write_4K_lowdepth " ]
2021-09-28 13:26:09 -04:00
2020-08-25 12:16:23 -04:00
for test in benchmark_details :
2021-09-28 13:26:09 -04:00
# Work around old results that had these obsolete tests
2021-11-06 03:02:43 -04:00
if test == " rand_read_256K " or test == " rand_write_256K " :
2021-09-28 13:26:09 -04:00
continue
2020-08-25 12:16:23 -04:00
_test_name_length = len ( nice_test_name_map [ test ] )
if _test_name_length > test_name_length :
test_name_length = _test_name_length
2021-11-06 03:02:43 -04:00
for element in benchmark_details [ test ] [ " overall " ] :
_element_length = len ( benchmark_details [ test ] [ " overall " ] [ element ] )
2020-08-25 12:16:23 -04:00
if _element_length > overall_column_length :
overall_column_length = _element_length
2021-11-06 03:02:43 -04:00
for element in benchmark_details [ test ] [ " bandwidth " ] :
2020-08-25 12:16:23 -04:00
try :
2021-11-06 03:02:43 -04:00
_element_length = len (
format_bytes_tohuman (
int ( float ( benchmark_details [ test ] [ " bandwidth " ] [ element ] ) )
)
)
2020-11-06 18:55:10 -05:00
except Exception :
2021-11-06 03:02:43 -04:00
_element_length = len ( benchmark_details [ test ] [ " bandwidth " ] [ element ] )
2020-08-25 12:16:23 -04:00
if _element_length > bandwidth_column_length :
bandwidth_column_length = _element_length
2021-11-06 03:02:43 -04:00
for element in benchmark_details [ test ] [ " iops " ] :
2020-08-25 12:16:23 -04:00
try :
2021-11-06 03:02:43 -04:00
_element_length = len (
format_ops_tohuman (
int ( float ( benchmark_details [ test ] [ " iops " ] [ element ] ) )
)
)
2020-11-06 18:55:10 -05:00
except Exception :
2021-11-06 03:02:43 -04:00
_element_length = len ( benchmark_details [ test ] [ " iops " ] [ element ] )
2020-08-25 12:16:23 -04:00
if _element_length > iops_column_length :
iops_column_length = _element_length
2021-11-06 03:02:43 -04:00
for element in benchmark_details [ test ] [ " latency " ] :
_element_length = len ( benchmark_details [ test ] [ " latency " ] [ element ] )
2020-08-25 12:16:23 -04:00
if _element_length > latency_column_length :
latency_column_length = _element_length
2021-11-06 03:02:43 -04:00
for element in benchmark_details [ test ] [ " cpu " ] :
_element_length = len ( benchmark_details [ test ] [ " cpu " ] [ element ] )
2020-08-25 12:16:23 -04:00
if _element_length > cpuutil_column_length :
cpuutil_column_length = _element_length
for test in benchmark_details :
2021-09-28 13:26:09 -04:00
# Work around old results that had these obsolete tests
2021-11-06 03:02:43 -04:00
if test == " rand_read_256K " or test == " rand_write_256K " :
2021-09-28 13:26:09 -04:00
continue
2021-11-06 03:02:43 -04:00
ainformation . append ( " " )
2020-08-25 12:16:23 -04:00
test_details = benchmark_details [ test ]
# Top row (Headers)
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : < { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : < { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : < { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
test_name = " Test: " ,
test_name_length = test_name_length ,
overall_label = " " ,
overall_label_length = overall_label_length ,
overall = " General " ,
overall_length = overall_column_length ,
bandwidth_label = " " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = " Bandwidth " ,
bandwidth_length = bandwidth_column_length ,
iops = " IOPS " ,
iops_length = iops_column_length ,
latency = " Latency (μs) " ,
latency_length = latency_column_length ,
cpuutil_label = " " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = " CPU Util " ,
cpuutil_length = cpuutil_column_length ,
)
)
2020-08-25 12:16:23 -04:00
# Second row (Test, Size, Min, User))
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : > { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
test_name = nice_test_name_map [ test ] ,
test_name_length = test_name_length ,
overall_label = " Test Size: " ,
overall_label_length = overall_label_length ,
overall = format_bytes_tohuman (
int ( test_details [ " overall " ] [ " iosize " ] ) * 1024
) ,
overall_length = overall_column_length ,
bandwidth_label = " Min: " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = format_bytes_tohuman (
int ( test_details [ " bandwidth " ] [ " min " ] ) * 1024
) ,
bandwidth_length = bandwidth_column_length ,
iops = format_ops_tohuman ( int ( test_details [ " iops " ] [ " min " ] ) ) ,
iops_length = iops_column_length ,
latency = test_details [ " latency " ] [ " min " ] ,
latency_length = latency_column_length ,
cpuutil_label = " User: " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = test_details [ " cpu " ] [ " user " ] ,
cpuutil_length = cpuutil_column_length ,
)
)
2020-08-25 12:16:23 -04:00
# Third row (blank, BW/s, Max, System))
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : > { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
test_name = " " ,
test_name_length = test_name_length ,
overall_label = " Bandwidth/s: " ,
overall_label_length = overall_label_length ,
overall = format_bytes_tohuman (
int ( test_details [ " overall " ] [ " bandwidth " ] ) * 1024
) ,
overall_length = overall_column_length ,
bandwidth_label = " Max: " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = format_bytes_tohuman (
int ( test_details [ " bandwidth " ] [ " max " ] ) * 1024
) ,
bandwidth_length = bandwidth_column_length ,
iops = format_ops_tohuman ( int ( test_details [ " iops " ] [ " max " ] ) ) ,
iops_length = iops_column_length ,
latency = test_details [ " latency " ] [ " max " ] ,
latency_length = latency_column_length ,
cpuutil_label = " System: " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = test_details [ " cpu " ] [ " system " ] ,
cpuutil_length = cpuutil_column_length ,
)
)
2020-08-25 12:16:23 -04:00
# Fourth row (blank, IOPS, Mean, CtxSq))
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : > { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
test_name = " " ,
test_name_length = test_name_length ,
overall_label = " IOPS: " ,
overall_label_length = overall_label_length ,
overall = format_ops_tohuman ( int ( test_details [ " overall " ] [ " iops " ] ) ) ,
overall_length = overall_column_length ,
bandwidth_label = " Mean: " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = format_bytes_tohuman (
int ( float ( test_details [ " bandwidth " ] [ " mean " ] ) ) * 1024
) ,
bandwidth_length = bandwidth_column_length ,
iops = format_ops_tohuman ( int ( float ( test_details [ " iops " ] [ " mean " ] ) ) ) ,
iops_length = iops_column_length ,
latency = test_details [ " latency " ] [ " mean " ] ,
latency_length = latency_column_length ,
cpuutil_label = " CtxSw: " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = test_details [ " cpu " ] [ " ctxsw " ] ,
cpuutil_length = cpuutil_column_length ,
)
)
2020-08-25 12:16:23 -04:00
# Fifth row (blank, Runtime, StdDev, MajFault))
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : > { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
test_name = " " ,
test_name_length = test_name_length ,
overall_label = " Runtime (s): " ,
overall_label_length = overall_label_length ,
overall = int ( test_details [ " overall " ] [ " runtime " ] ) / 1000.0 ,
overall_length = overall_column_length ,
bandwidth_label = " StdDev: " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = format_bytes_tohuman (
int ( float ( test_details [ " bandwidth " ] [ " stdev " ] ) ) * 1024
) ,
bandwidth_length = bandwidth_column_length ,
iops = format_ops_tohuman ( int ( float ( test_details [ " iops " ] [ " stdev " ] ) ) ) ,
iops_length = iops_column_length ,
latency = test_details [ " latency " ] [ " stdev " ] ,
latency_length = latency_column_length ,
cpuutil_label = " MajFault: " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = test_details [ " cpu " ] [ " majfault " ] ,
cpuutil_length = cpuutil_column_length ,
)
)
2020-08-25 12:16:23 -04:00
# Sixth row (blank, blank, Samples, MinFault))
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2020-08-25 12:16:23 -04:00
{ test_name : < { test_name_length } } \
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
{ latency : < { latency_length } } \
{ cpuutil_label : > { cpuutil_label_length } } \
{ cpuutil : < { cpuutil_length } } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
test_name = " " ,
test_name_length = test_name_length ,
overall_label = " " ,
overall_label_length = overall_label_length ,
overall = " " ,
overall_length = overall_column_length ,
bandwidth_label = " Samples: " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = test_details [ " bandwidth " ] [ " numsamples " ] ,
bandwidth_length = bandwidth_column_length ,
iops = test_details [ " iops " ] [ " numsamples " ] ,
iops_length = iops_column_length ,
latency = " " ,
latency_length = latency_column_length ,
cpuutil_label = " MinFault: " ,
cpuutil_label_length = cpuutil_label_length ,
cpuutil = test_details [ " cpu " ] [ " minfault " ] ,
cpuutil_length = cpuutil_column_length ,
)
)
ainformation . append ( " " )
return " \n " . join ( ainformation )
2021-10-02 04:46:44 -04:00
def format_info_benchmark_json ( config , benchmark_information ) :
2021-11-06 03:02:43 -04:00
if benchmark_information [ " benchmark_result " ] == " Running " :
2021-10-02 04:46:44 -04:00
return " Benchmark test is still running. "
2021-11-06 03:02:43 -04:00
benchmark_details = benchmark_information [ " benchmark_result " ]
2021-10-02 04:46:44 -04:00
# Format a nice output; do this line-by-line then concat the elements at the end
ainformation = [ ]
2021-11-06 03:02:43 -04:00
ainformation . append (
" {} Storage Benchmark details: {} " . format ( ansiprint . bold ( ) , ansiprint . end ( ) )
)
2021-10-02 04:46:44 -04:00
nice_test_name_map = {
2021-10-03 15:49:01 -04:00
" seq_read " : " Sequential Read (4M blocks, queue depth 64) " ,
" seq_write " : " Sequential Write (4M blocks, queue depth 64) " ,
" rand_read_4M " : " Random Read (4M blocks, queue depth 64) " ,
" rand_write_4M " : " Random Write (4M blocks queue depth 64) " ,
" rand_read_4K " : " Random Read (4K blocks, queue depth 64) " ,
" rand_write_4K " : " Random Write (4K blocks, queue depth 64) " ,
" rand_read_4K_lowdepth " : " Random Read (4K blocks, queue depth 1) " ,
" rand_write_4K_lowdepth " : " Random Write (4K blocks, queue depth 1) " ,
2021-10-02 04:46:44 -04:00
}
for test in benchmark_details :
2021-11-06 03:02:43 -04:00
ainformation . append ( " " )
2021-10-02 04:46:44 -04:00
io_class = None
2021-11-06 03:02:43 -04:00
for _io_class in [ " read " , " write " ] :
if benchmark_details [ test ] [ " jobs " ] [ 0 ] [ _io_class ] [ " io_bytes " ] > 0 :
2021-10-02 04:46:44 -04:00
io_class = _io_class
if io_class is None :
continue
2021-11-06 03:02:43 -04:00
job_details = benchmark_details [ test ] [ " jobs " ] [ 0 ]
2021-10-02 04:46:44 -04:00
2021-10-03 15:49:01 -04:00
# Calculate the unified latency categories (in us)
latency_tree = list ( )
2021-11-06 03:02:43 -04:00
for field in job_details [ " latency_ns " ] :
2021-10-03 15:49:01 -04:00
bucket = str ( int ( field ) / 1000 )
2021-11-06 03:02:43 -04:00
latency_tree . append ( ( bucket , job_details [ " latency_ns " ] [ field ] ) )
for field in job_details [ " latency_us " ] :
2021-10-03 15:49:01 -04:00
bucket = field
2021-11-06 03:02:43 -04:00
latency_tree . append ( ( bucket , job_details [ " latency_us " ] [ field ] ) )
for field in job_details [ " latency_ms " ] :
2021-10-03 15:49:01 -04:00
# That one annoying one
2021-11-06 03:02:43 -04:00
if field == " >=2000 " :
bucket = " >=2000000 "
2021-10-03 15:49:01 -04:00
else :
bucket = str ( int ( field ) * 1000 )
2021-11-06 03:02:43 -04:00
latency_tree . append ( ( bucket , job_details [ " latency_ms " ] [ field ] ) )
2021-10-03 15:49:01 -04:00
# Find the minimum entry without a zero
useful_latency_tree = list ( )
for element in latency_tree :
if element [ 1 ] != 0 :
useful_latency_tree . append ( element )
max_rows = 9
if len ( useful_latency_tree ) > 9 :
max_rows = len ( useful_latency_tree )
elif len ( useful_latency_tree ) < 9 :
while len ( useful_latency_tree ) < 9 :
2021-11-06 03:02:43 -04:00
useful_latency_tree . append ( ( " " , " " ) )
2021-10-03 15:49:01 -04:00
# Format the static data
2021-11-06 03:02:43 -04:00
overall_label = [
" Overall BW/s: " ,
" Overall IOPS: " ,
" Total I/O: " ,
" Runtime (s): " ,
" User CPU % : " ,
" System CPU % : " ,
" Ctx Switches: " ,
" Major Faults: " ,
" Minor Faults: " ,
]
2021-10-03 15:49:01 -04:00
while len ( overall_label ) < max_rows :
2021-11-06 03:02:43 -04:00
overall_label . append ( " " )
overall_data = [
format_bytes_tohuman ( int ( job_details [ io_class ] [ " bw_bytes " ] ) ) ,
format_ops_tohuman ( int ( job_details [ io_class ] [ " iops " ] ) ) ,
format_bytes_tohuman ( int ( job_details [ io_class ] [ " io_bytes " ] ) ) ,
job_details [ " job_runtime " ] / 1000 ,
job_details [ " usr_cpu " ] ,
job_details [ " sys_cpu " ] ,
job_details [ " ctx " ] ,
job_details [ " majf " ] ,
job_details [ " minf " ] ,
]
2021-10-03 15:49:01 -04:00
while len ( overall_data ) < max_rows :
2021-11-06 03:02:43 -04:00
overall_data . append ( " " )
bandwidth_label = [
" Min: " ,
" Max: " ,
" Mean: " ,
" StdDev: " ,
" Samples: " ,
" " ,
" " ,
" " ,
" " ,
]
2021-10-03 15:49:01 -04:00
while len ( bandwidth_label ) < max_rows :
2021-11-06 03:02:43 -04:00
bandwidth_label . append ( " " )
bandwidth_data = [
format_bytes_tohuman ( int ( job_details [ io_class ] [ " bw_min " ] ) * 1024 ) ,
format_bytes_tohuman ( int ( job_details [ io_class ] [ " bw_max " ] ) * 1024 ) ,
format_bytes_tohuman ( int ( job_details [ io_class ] [ " bw_mean " ] ) * 1024 ) ,
format_bytes_tohuman ( int ( job_details [ io_class ] [ " bw_dev " ] ) * 1024 ) ,
job_details [ io_class ] [ " bw_samples " ] ,
" " ,
" " ,
" " ,
" " ,
]
2021-10-03 15:49:01 -04:00
while len ( bandwidth_data ) < max_rows :
2021-11-06 03:02:43 -04:00
bandwidth_data . append ( " " )
iops_data = [
format_ops_tohuman ( int ( job_details [ io_class ] [ " iops_min " ] ) ) ,
format_ops_tohuman ( int ( job_details [ io_class ] [ " iops_max " ] ) ) ,
format_ops_tohuman ( int ( job_details [ io_class ] [ " iops_mean " ] ) ) ,
format_ops_tohuman ( int ( job_details [ io_class ] [ " iops_stddev " ] ) ) ,
job_details [ io_class ] [ " iops_samples " ] ,
" " ,
" " ,
" " ,
" " ,
]
2021-10-03 15:49:01 -04:00
while len ( iops_data ) < max_rows :
2021-11-06 03:02:43 -04:00
iops_data . append ( " " )
lat_data = [
int ( job_details [ io_class ] [ " lat_ns " ] [ " min " ] ) / 1000 ,
int ( job_details [ io_class ] [ " lat_ns " ] [ " max " ] ) / 1000 ,
int ( job_details [ io_class ] [ " lat_ns " ] [ " mean " ] ) / 1000 ,
int ( job_details [ io_class ] [ " lat_ns " ] [ " stddev " ] ) / 1000 ,
" " ,
" " ,
" " ,
" " ,
" " ,
]
2021-10-03 15:49:01 -04:00
while len ( lat_data ) < max_rows :
2021-11-06 03:02:43 -04:00
lat_data . append ( " " )
2021-10-03 15:49:01 -04:00
# Format the dynamic buckets
lat_bucket_label = list ( )
lat_bucket_data = list ( )
for element in useful_latency_tree :
lat_bucket_label . append ( element [ 0 ] )
lat_bucket_data . append ( element [ 1 ] )
# Column default widths
overall_label_length = 0
overall_column_length = 0
bandwidth_label_length = 0
bandwidth_column_length = 11
iops_column_length = 4
latency_column_length = 12
latency_bucket_label_length = 0
2021-10-02 04:46:44 -04:00
2021-10-03 15:49:01 -04:00
# Column layout:
# General Bandwidth IOPS Latency Percentiles
# --------- ---------- -------- -------- ---------------
# Size Min Min Min A
# BW Max Max Max B
# IOPS Mean Mean Mean ...
# Runtime StdDev StdDev StdDev Z
# UsrCPU Samples Samples
# SysCPU
# CtxSw
# MajFault
# MinFault
# Set column widths
2021-10-02 04:46:44 -04:00
for item in overall_label :
_item_length = len ( str ( item ) )
if _item_length > overall_label_length :
overall_label_length = _item_length
for item in overall_data :
_item_length = len ( str ( item ) )
if _item_length > overall_column_length :
overall_column_length = _item_length
test_name_length = len ( nice_test_name_map [ test ] )
if test_name_length > overall_label_length + overall_column_length :
_diff = test_name_length - ( overall_label_length + overall_column_length )
overall_column_length + = _diff
for item in bandwidth_label :
_item_length = len ( str ( item ) )
if _item_length > bandwidth_label_length :
bandwidth_label_length = _item_length
for item in bandwidth_data :
_item_length = len ( str ( item ) )
if _item_length > bandwidth_column_length :
bandwidth_column_length = _item_length
for item in iops_data :
_item_length = len ( str ( item ) )
if _item_length > iops_column_length :
iops_column_length = _item_length
for item in lat_data :
_item_length = len ( str ( item ) )
if _item_length > latency_column_length :
latency_column_length = _item_length
2021-10-03 15:49:01 -04:00
for item in lat_bucket_label :
2021-10-02 04:46:44 -04:00
_item_length = len ( str ( item ) )
2021-10-03 15:49:01 -04:00
if _item_length > latency_bucket_label_length :
latency_bucket_label_length = _item_length
2021-10-02 04:46:44 -04:00
# Top row (Headers)
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2021-10-02 04:46:44 -04:00
{ overall_label : < { overall_label_length } } \
{ bandwidth_label : < { bandwidth_label_length } } \
2021-10-03 15:49:01 -04:00
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
2021-10-02 04:46:44 -04:00
{ latency : < { latency_length } } \
2021-10-03 15:49:01 -04:00
{ latency_bucket_label : < { latency_bucket_label_length } } \
{ latency_bucket } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = ansiprint . bold ( ) ,
end_bold = ansiprint . end ( ) ,
overall_label = nice_test_name_map [ test ] ,
overall_label_length = overall_label_length ,
bandwidth_label = " " ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = " Bandwidth/s " ,
bandwidth_length = bandwidth_column_length ,
iops = " IOPS " ,
iops_length = iops_column_length ,
latency = " Latency (μs) " ,
latency_length = latency_column_length ,
latency_bucket_label = " Latency Buckets (μs/ % ) " ,
latency_bucket_label_length = latency_bucket_label_length ,
latency_bucket = " " ,
)
)
2021-10-02 04:46:44 -04:00
2021-10-03 15:49:01 -04:00
for idx in range ( 0 , max_rows ) :
2021-10-02 04:46:44 -04:00
# Top row (Headers)
2021-11-06 03:02:43 -04:00
ainformation . append (
" {bold} \
2021-10-02 04:46:44 -04:00
{ overall_label : > { overall_label_length } } \
{ overall : < { overall_length } } \
{ bandwidth_label : > { bandwidth_label_length } } \
2021-10-03 15:49:01 -04:00
{ bandwidth : < { bandwidth_length } } \
{ iops : < { iops_length } } \
2021-10-02 04:46:44 -04:00
{ latency : < { latency_length } } \
2021-10-03 15:49:01 -04:00
{ latency_bucket_label : > { latency_bucket_label_length } } \
{ latency_bucket } \
2021-11-06 03:02:43 -04:00
{ end_bold } " .format(
bold = " " ,
end_bold = " " ,
overall_label = overall_label [ idx ] ,
overall_label_length = overall_label_length ,
overall = overall_data [ idx ] ,
overall_length = overall_column_length ,
bandwidth_label = bandwidth_label [ idx ] ,
bandwidth_label_length = bandwidth_label_length ,
bandwidth = bandwidth_data [ idx ] ,
bandwidth_length = bandwidth_column_length ,
iops = iops_data [ idx ] ,
iops_length = iops_column_length ,
latency = lat_data [ idx ] ,
latency_length = latency_column_length ,
latency_bucket_label = lat_bucket_label [ idx ] ,
latency_bucket_label_length = latency_bucket_label_length ,
latency_bucket = lat_bucket_data [ idx ] ,
)
)
return " \n " . join ( ainformation )