Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
f164d898c1 | |||
195f31501c | |||
a8899a1d66 | |||
817dffcf30 | |||
eda2a57a73 | |||
135d28e60b | |||
e7d7378bae | |||
799c3e8d5d |
14
CHANGELOG.md
14
CHANGELOG.md
@ -1,5 +1,19 @@
|
|||||||
## PVC Changelog
|
## PVC Changelog
|
||||||
|
|
||||||
|
###### [v0.9.45](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.45)
|
||||||
|
|
||||||
|
* [Node Daemon] Fixes an ordering issue with pvcnoded.service
|
||||||
|
* [CLI Client] Fixes bad calls to echo() without argument
|
||||||
|
|
||||||
|
###### [v0.9.44](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.44)
|
||||||
|
|
||||||
|
* [Node Daemon] Adds a Munin plugin for Ceph utilization
|
||||||
|
* [CLI] Fixes timeouts for long-running API commands
|
||||||
|
|
||||||
|
###### [v0.9.44](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.44)
|
||||||
|
|
||||||
|
* [CLI] Fixes timeout issues with long-running API commands
|
||||||
|
|
||||||
###### [v0.9.43](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.43)
|
###### [v0.9.43](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.43)
|
||||||
|
|
||||||
* [Packaging] Fixes a bad test in postinst
|
* [Packaging] Fixes a bad test in postinst
|
||||||
|
@ -25,7 +25,7 @@ import yaml
|
|||||||
from distutils.util import strtobool as dustrtobool
|
from distutils.util import strtobool as dustrtobool
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.43"
|
version = "0.9.45"
|
||||||
|
|
||||||
# API version
|
# API version
|
||||||
API_VERSION = 1.0
|
API_VERSION = 1.0
|
||||||
|
@ -15,7 +15,7 @@ cp -a debian/changelog client-cli/setup.py ${tmpdir}/
|
|||||||
cp -a node-daemon/pvcnoded/Daemon.py ${tmpdir}/node-Daemon.py
|
cp -a node-daemon/pvcnoded/Daemon.py ${tmpdir}/node-Daemon.py
|
||||||
cp -a api-daemon/pvcapid/Daemon.py ${tmpdir}/api-Daemon.py
|
cp -a api-daemon/pvcapid/Daemon.py ${tmpdir}/api-Daemon.py
|
||||||
# Replace the "base" version with the git revision version
|
# Replace the "base" version with the git revision version
|
||||||
sed -i "s/version = '${base_ver}'/version = '${new_ver}'/" node-daemon/pvcnoded/Daemon.py api-daemon/pvcapid/Daemon.py client-cli/setup.py
|
sed -i "s/version = \"${base_ver}\"/version = \"${new_ver}\"/" node-daemon/pvcnoded/Daemon.py api-daemon/pvcapid/Daemon.py client-cli/setup.py
|
||||||
sed -i "s/${base_ver}-0/${new_ver}/" debian/changelog
|
sed -i "s/${base_ver}-0/${new_ver}/" debian/changelog
|
||||||
cat <<EOF > debian/changelog
|
cat <<EOF > debian/changelog
|
||||||
pvc (${new_ver}) unstable; urgency=medium
|
pvc (${new_ver}) unstable; urgency=medium
|
||||||
|
@ -123,8 +123,10 @@ def call_api(
|
|||||||
params=None,
|
params=None,
|
||||||
data=None,
|
data=None,
|
||||||
files=None,
|
files=None,
|
||||||
timeout=3,
|
|
||||||
):
|
):
|
||||||
|
# Set the connect timeout to 3 seconds but extremely long (48 hour) data timeout
|
||||||
|
timeout = (3.05, 172800)
|
||||||
|
|
||||||
# Craft the URI
|
# Craft the URI
|
||||||
uri = "{}://{}{}{}".format(
|
uri = "{}://{}{}{}".format(
|
||||||
config["api_scheme"], config["api_host"], config["api_prefix"], request_uri
|
config["api_scheme"], config["api_host"], config["api_prefix"], request_uri
|
||||||
|
@ -377,19 +377,12 @@ def vm_state(config, vm, target_state, force=False, wait=False):
|
|||||||
API arguments: state={state}, wait={wait}
|
API arguments: state={state}, wait={wait}
|
||||||
API schema: {"message":"{data}"}
|
API schema: {"message":"{data}"}
|
||||||
"""
|
"""
|
||||||
if wait or target_state == "disable":
|
|
||||||
timeout = 300
|
|
||||||
else:
|
|
||||||
timeout = 3
|
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"state": target_state,
|
"state": target_state,
|
||||||
"force": str(force).lower(),
|
"force": str(force).lower(),
|
||||||
"wait": str(wait).lower(),
|
"wait": str(wait).lower(),
|
||||||
}
|
}
|
||||||
response = call_api(
|
response = call_api(config, "post", "/vm/{vm}/state".format(vm=vm), params=params)
|
||||||
config, "post", "/vm/{vm}/state".format(vm=vm), params=params, timeout=timeout
|
|
||||||
)
|
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
retstatus = True
|
retstatus = True
|
||||||
|
@ -364,7 +364,7 @@ def cluster_list(raw):
|
|||||||
if not raw:
|
if not raw:
|
||||||
# Display the data nicely
|
# Display the data nicely
|
||||||
echo("Available clusters:")
|
echo("Available clusters:")
|
||||||
echo()
|
echo("")
|
||||||
echo(
|
echo(
|
||||||
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
||||||
bold=ansiprint.bold(),
|
bold=ansiprint.bold(),
|
||||||
@ -481,7 +481,7 @@ def node_secondary(node, wait):
|
|||||||
" These jobs will continue executing, but status will not be visible until the current"
|
" These jobs will continue executing, but status will not be visible until the current"
|
||||||
)
|
)
|
||||||
echo(" node returns to primary state.")
|
echo(" node returns to primary state.")
|
||||||
echo()
|
echo("")
|
||||||
|
|
||||||
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "secondary")
|
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "secondary")
|
||||||
if not retcode:
|
if not retcode:
|
||||||
@ -534,7 +534,7 @@ def node_primary(node, wait):
|
|||||||
" These jobs will continue executing, but status will not be visible until the current"
|
" These jobs will continue executing, but status will not be visible until the current"
|
||||||
)
|
)
|
||||||
echo(" node returns to primary state.")
|
echo(" node returns to primary state.")
|
||||||
echo()
|
echo("")
|
||||||
|
|
||||||
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "primary")
|
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "primary")
|
||||||
if not retcode:
|
if not retcode:
|
||||||
@ -5329,7 +5329,7 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
task_id = retdata
|
task_id = retdata
|
||||||
|
|
||||||
echo("Task ID: {}".format(task_id))
|
echo("Task ID: {}".format(task_id))
|
||||||
echo()
|
echo("")
|
||||||
|
|
||||||
# Wait for the task to start
|
# Wait for the task to start
|
||||||
echo("Waiting for task to start...", nl=False)
|
echo("Waiting for task to start...", nl=False)
|
||||||
@ -5340,7 +5340,7 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
break
|
break
|
||||||
echo(".", nl=False)
|
echo(".", nl=False)
|
||||||
echo(" done.")
|
echo(" done.")
|
||||||
echo()
|
echo("")
|
||||||
|
|
||||||
# Start following the task state, updating progress as we go
|
# Start following the task state, updating progress as we go
|
||||||
total_task = task_status.get("total")
|
total_task = task_status.get("total")
|
||||||
@ -5371,7 +5371,7 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
if task_status.get("state") == "SUCCESS":
|
if task_status.get("state") == "SUCCESS":
|
||||||
bar.update(total_task - last_task)
|
bar.update(total_task - last_task)
|
||||||
|
|
||||||
echo()
|
echo("")
|
||||||
retdata = task_status.get("state") + ": " + task_status.get("status")
|
retdata = task_status.get("state") + ": " + task_status.get("status")
|
||||||
|
|
||||||
cleanup(retcode, retdata)
|
cleanup(retcode, retdata)
|
||||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="pvc",
|
name="pvc",
|
||||||
version="0.9.43",
|
version="0.9.45",
|
||||||
packages=["pvc", "pvc.cli_lib"],
|
packages=["pvc", "pvc.cli_lib"],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"Click",
|
"Click",
|
||||||
|
20
debian/changelog
vendored
20
debian/changelog
vendored
@ -1,3 +1,23 @@
|
|||||||
|
pvc (0.9.45-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [Node Daemon] Fixes an ordering issue with pvcnoded.service
|
||||||
|
* [CLI Client] Fixes bad calls to echo() without argument
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Thu, 25 Nov 2021 09:34:20 -0500
|
||||||
|
|
||||||
|
pvc (0.9.44-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [Node Daemon] Adds a Munin plugin for Ceph utilization
|
||||||
|
* [CLI] Fixes timeouts for long-running API commands
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Thu, 11 Nov 2021 16:20:38 -0500
|
||||||
|
|
||||||
|
pvc (0.9.44-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [CLI] Fixes timeout issues with long-running API commands
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Thu, 11 Nov 2021 16:19:32 -0500
|
||||||
|
|
||||||
pvc (0.9.43-0) unstable; urgency=high
|
pvc (0.9.43-0) unstable; urgency=high
|
||||||
|
|
||||||
* [Packaging] Fixes a bad test in postinst
|
* [Packaging] Fixes a bad test in postinst
|
||||||
|
325
node-daemon/monitoring/munin/ceph_utilization
Executable file
325
node-daemon/monitoring/munin/ceph_utilization
Executable file
@ -0,0 +1,325 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# -*- sh -*-
|
||||||
|
|
||||||
|
: << =cut
|
||||||
|
|
||||||
|
=head1 NAME
|
||||||
|
|
||||||
|
ceph_utilization - Plugin to monitor a Ceph cluster's utilization
|
||||||
|
|
||||||
|
=head1 CONFIGURATION
|
||||||
|
|
||||||
|
Defaults (no config required) for the total utilization thresholds:
|
||||||
|
|
||||||
|
[ceph_utilization]
|
||||||
|
env.warning 80
|
||||||
|
env.critical 90
|
||||||
|
|
||||||
|
=head1 AUTHOR
|
||||||
|
|
||||||
|
Joshua Boniface <joshua@boniface.me>
|
||||||
|
|
||||||
|
=head1 LICENSE
|
||||||
|
|
||||||
|
GPLv3
|
||||||
|
|
||||||
|
=head1 BUGS
|
||||||
|
|
||||||
|
=back
|
||||||
|
|
||||||
|
=head1 MAGIC MARKERS
|
||||||
|
|
||||||
|
#%# family=auto
|
||||||
|
#%# capabilities=autoconf
|
||||||
|
|
||||||
|
=cut
|
||||||
|
|
||||||
|
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
||||||
|
|
||||||
|
is_multigraph
|
||||||
|
|
||||||
|
warning=80
|
||||||
|
critical=90
|
||||||
|
|
||||||
|
RADOSDF_CMD="/usr/bin/sudo /usr/bin/rados df --format json"
|
||||||
|
OSDDF_CMD="/usr/bin/sudo /usr/bin/ceph osd df --format json"
|
||||||
|
JQ_CMD="/usr/bin/jq"
|
||||||
|
|
||||||
|
output_usage() {
|
||||||
|
echo "This plugin outputs information about a Ceph cluster"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
output_autoconf() {
|
||||||
|
$RADOSDF_CMD &>/dev/null
|
||||||
|
radosdf_ret=$?
|
||||||
|
$OSDDF_CMD &>/dev/null
|
||||||
|
osddf_ret=$?
|
||||||
|
$JQ_CMD --version &>/dev/null
|
||||||
|
jq_ret=$?
|
||||||
|
|
||||||
|
if [[ ${radosdf_ret} -eq 0 && ${osddf_ret} -eq 0 && ${jq_ret} -eq 0 ]]; then
|
||||||
|
echo "yes"
|
||||||
|
elif [[ ${radosdf_ret} -ne 0 || ${osddf_ret} -ne 0 ]]; then
|
||||||
|
echo "no (no 'rados' or 'ceph' command found)"
|
||||||
|
elif [[ ${jq_ret} -ne 0 ]]; then
|
||||||
|
echo "no (no 'jq' command found)"
|
||||||
|
else
|
||||||
|
echo "no (general failure)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
output_config() {
|
||||||
|
# Graph set 1 - Ceph cluster utilization
|
||||||
|
echo 'multigraph cluster_utilization'
|
||||||
|
echo 'graph_title Cluster Utilization'
|
||||||
|
echo 'graph_args --base 1000'
|
||||||
|
echo 'graph_vlabel % Utilization'
|
||||||
|
echo 'graph_category ceph'
|
||||||
|
echo 'graph_info This graph shows the cluster utilization.'
|
||||||
|
|
||||||
|
echo 'cluster_utilization.label Cluster Utilization'
|
||||||
|
echo 'cluster_utilization.type GAUGE'
|
||||||
|
echo 'cluster_utilization.max 100'
|
||||||
|
echo 'cluster_utilization.info Percentage utilization of the cluster.'
|
||||||
|
print_warning cluster_utilization
|
||||||
|
print_critical cluster_utilization
|
||||||
|
|
||||||
|
# Graph set 2 - Ceph cluster objects
|
||||||
|
echo 'multigraph cluster_objects'
|
||||||
|
echo 'graph_title Cluster Objects'
|
||||||
|
echo 'graph_args --base 1000'
|
||||||
|
echo 'graph_vlabel Objects'
|
||||||
|
echo 'graph_category ceph'
|
||||||
|
echo 'graph_info This graph shows the cluster object count.'
|
||||||
|
|
||||||
|
echo 'cluster_objects.label Cluster Objects'
|
||||||
|
echo 'cluster_objects.type GAUGE'
|
||||||
|
echo 'cluster_objects.min 0'
|
||||||
|
echo 'cluster_objects.info Total objects in the cluster.'
|
||||||
|
|
||||||
|
POOL_LIST="$( $RADOSDF_CMD | jq -r '.pools[].name' )"
|
||||||
|
|
||||||
|
# Graph set 3 - Cluster I/O Bytes Lifetime
|
||||||
|
echo 'multigraph pool_rdbytes'
|
||||||
|
echo "graph_title IO Bytes (Lifetime)"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel bytes read (-) / write (+)"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the lifetime cluster bytes."
|
||||||
|
for pool in ${POOL_LIST}; do
|
||||||
|
# Graph set 3 - Cluster I/O Bytes Lifetime
|
||||||
|
echo "pool_rdbytes_${pool}.label Pool ${pool} IO (Bytes)"
|
||||||
|
echo "pool_rdbytes_${pool}.type GAUGE"
|
||||||
|
echo "pool_rdbytes_${pool}.min 0"
|
||||||
|
echo "pool_rdbytes_${pool}.draw LINE1"
|
||||||
|
echo "pool_rdbytes_${pool}.graph no"
|
||||||
|
echo "pool_wrbytes_${pool}.label Pool ${pool} IO (Bytes)"
|
||||||
|
echo "pool_wrbytes_${pool}.type GAUGE"
|
||||||
|
echo "pool_wrbytes_${pool}.min 0"
|
||||||
|
echo "pool_wrbytes_${pool}.draw LINE1"
|
||||||
|
echo "pool_wrbytes_${pool}.negative pool_rdbytes_${pool}"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Graph set 4 - Cluster I/O Operations Lifetime
|
||||||
|
echo 'multigraph pool_rdops'
|
||||||
|
echo "graph_title IO Operations (Lifetime)"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel IOs read (-) / write (+)"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the lifetime cluster IOs."
|
||||||
|
for pool in ${POOL_LIST}; do
|
||||||
|
# Graph set 4 - Cluster I/O Operations Lifetime
|
||||||
|
echo "pool_rdops_${pool}.label Pool ${pool} IO (Ops)"
|
||||||
|
echo "pool_rdops_${pool}.type GAUGE"
|
||||||
|
echo "pool_rdops_${pool}.min 0"
|
||||||
|
echo "pool_rdops_${pool}.draw LINE1"
|
||||||
|
echo "pool_rdops_${pool}.graph no"
|
||||||
|
echo "pool_wrops_${pool}.label Pool ${pool} IO (Ops)"
|
||||||
|
echo "pool_wrops_${pool}.type GAUGE"
|
||||||
|
echo "pool_wrops_${pool}.min 0"
|
||||||
|
echo "pool_wrops_${pool}.draw LINE1"
|
||||||
|
echo "pool_wrops_${pool}.negative pool_rdops_${pool}"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Graph set 5 - Ceph pool objects
|
||||||
|
echo 'multigraph pool_objects_total'
|
||||||
|
echo "graph_title Objects"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel Objects"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the cluster object count."
|
||||||
|
for pool in ${POOL_LIST}; do
|
||||||
|
# Graph set 5 - Ceph pool objects
|
||||||
|
echo "pool_objects_total_${pool}.label Pool ${pool} Objects"
|
||||||
|
echo "pool_objects_total_${pool}.type GAUGE"
|
||||||
|
echo "pool_objects_total_${pool}.min 0"
|
||||||
|
echo "pool_objects_total_${pool}.info Total objects in the pool."
|
||||||
|
done
|
||||||
|
|
||||||
|
# Graph set 6 - Ceph pool objects copies
|
||||||
|
echo 'multigraph pool_objects_copies'
|
||||||
|
echo "graph_title Objects Copies"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel Objects"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the cluster object copy count."
|
||||||
|
for pool in ${POOL_LIST}; do
|
||||||
|
# Graph set 6 - Ceph pool objects copies
|
||||||
|
echo "pool_objects_copies_${pool}.label Pool ${pool} Objects Copies"
|
||||||
|
echo "pool_objects_copies_${pool}.type GAUGE"
|
||||||
|
echo "pool_objects_copies_${pool}.min 0"
|
||||||
|
echo "pool_objects_copies_${pool}.info Total object copies in the pool."
|
||||||
|
done
|
||||||
|
|
||||||
|
# Graph set 7 - Ceph pool objects degraded
|
||||||
|
echo 'multigraph pool_objects_degraded'
|
||||||
|
echo "graph_title Objects Degraded"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel Objects"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the cluster object degraded count."
|
||||||
|
for pool in ${POOL_LIST}; do
|
||||||
|
# Graph set 7 - Ceph pool objects degraded
|
||||||
|
echo "pool_objects_degraded_${pool}.label Pool ${pool} Objects Degraded"
|
||||||
|
echo "pool_objects_degraded_${pool}.type GAUGE"
|
||||||
|
echo "pool_objects_degraded_${pool}.min 0"
|
||||||
|
echo "pool_objects_degraded_${pool}.info Total degraded objects in the pool."
|
||||||
|
done
|
||||||
|
|
||||||
|
OSD_LIST="$( $OSDDF_CMD | jq -r '.nodes[].id' | sort -n )"
|
||||||
|
|
||||||
|
# Graph set 8 - Ceph OSD status
|
||||||
|
echo 'multigraph osd_status'
|
||||||
|
echo "graph_title OSD Status"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel Status Up (1) / Down (0)"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the OSD status."
|
||||||
|
for osd in ${OSD_LIST}; do
|
||||||
|
# Graph set 8 - Ceph OSD status
|
||||||
|
echo "osd_status_${osd}.label osd.${osd} Status"
|
||||||
|
echo "osd_status_${osd}.type GAUGE"
|
||||||
|
echo "osd_status_${osd}.min 0"
|
||||||
|
echo "osd_status_${osd}.max 1"
|
||||||
|
echo "osd_status_${osd}.info Status of the OSD."
|
||||||
|
done
|
||||||
|
|
||||||
|
# Graph set 9 - Ceph OSD utilization
|
||||||
|
echo 'multigraph osd_utilization'
|
||||||
|
echo "graph_title OSD Utilization"
|
||||||
|
echo "graph_args --base 1000"
|
||||||
|
echo "graph_vlabel % Utilization"
|
||||||
|
echo "graph_category ceph"
|
||||||
|
echo "graph_info This graph shows the OSD utilization."
|
||||||
|
for osd in ${OSD_LIST}; do
|
||||||
|
# Graph set 9 - Ceph OSD utilization
|
||||||
|
echo "osd_utilization_${osd}.label osd.${osd} Utilization"
|
||||||
|
echo "osd_utilization_${osd}.type GAUGE"
|
||||||
|
echo "osd_utilization_${osd}.max 100"
|
||||||
|
echo "osd_utilization_${osd}.info Utilization of the OSD."
|
||||||
|
done
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
output_values() {
|
||||||
|
RADOS_JSON_OUTPUT="$( $RADOSDF_CMD )"
|
||||||
|
OSD_JSON_OUTPUT="$( $OSDDF_CMD )"
|
||||||
|
|
||||||
|
cluster_utilization="$( $JQ_CMD -r '.total_used' <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
cluster_size="$( $JQ_CMD -r '.total_space' <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pct_utilization="$( echo "scale=4; ${cluster_utilization} / ${cluster_size} * 100" | bc -l )"
|
||||||
|
cluster_objects="$( $JQ_CMD -r '.total_objects' <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
|
||||||
|
echo "multigraph cluster_utilization"
|
||||||
|
echo "cluster_utilization.value ${pct_utilization}"
|
||||||
|
echo "multigraph cluster_objects"
|
||||||
|
echo "cluster_objects.value ${cluster_objects}"
|
||||||
|
|
||||||
|
cluster_pool_count="$( $JQ_CMD -r '.pools[].name' <<<"${RADOS_JSON_OUTPUT}" | wc -l )"
|
||||||
|
echo "multigraph pool_rdbytes"
|
||||||
|
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
||||||
|
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_rdbytes="$( $JQ_CMD -r ".pools[$id].read_bytes" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_wrbytes="$( $JQ_CMD -r ".pools[$id].write_bytes" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
echo "pool_rdbytes_${pool}.value ${pool_rdbytes}"
|
||||||
|
echo "pool_wrbytes_${pool}.value ${pool_wrbytes}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "multigraph pool_rdops"
|
||||||
|
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
||||||
|
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_rdops="$( $JQ_CMD -r ".pools[$id].read_ops" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_wrops="$( $JQ_CMD -r ".pools[$id].write_ops" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
echo "pool_rdops_${pool}.value ${pool_rdops}"
|
||||||
|
echo "pool_wrops_${pool}.value ${pool_wrops}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "multigraph pool_objects_total"
|
||||||
|
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
||||||
|
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_objects="$( $JQ_CMD -r ".pools[$id].num_objects" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
echo "pool_objects_total_${pool}.value ${pool_objects}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "multigraph pool_objects_copies"
|
||||||
|
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
||||||
|
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_copies="$( $JQ_CMD -r ".pools[$id].num_object_copies" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
echo "pool_objects_copies_${pool}.value ${pool_copies}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "multigraph pool_objects_degraded"
|
||||||
|
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
||||||
|
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
pool_degraded="$( $JQ_CMD -r ".pools[$id].num_objects_degraded" <<<"${RADOS_JSON_OUTPUT}" )"
|
||||||
|
echo "pool_objects_degraded_${pool}.value ${pool_degraded}"
|
||||||
|
done
|
||||||
|
|
||||||
|
cluster_osd_count="$( $JQ_CMD -r '.nodes[].id' <<<"${OSD_JSON_OUTPUT}" | wc -l)"
|
||||||
|
echo "multigraph osd_status"
|
||||||
|
for id in $( seq 0 $(( ${cluster_osd_count} - 1 )) ); do
|
||||||
|
osd="$( $JQ_CMD -r ".nodes[$id].id" <<<"${OSD_JSON_OUTPUT}" )"
|
||||||
|
osd_status="$( $JQ_CMD -r ".nodes[$id].status" <<<"${OSD_JSON_OUTPUT}" )"
|
||||||
|
case ${osd_status} in
|
||||||
|
up)
|
||||||
|
osd_status="1"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
osd_status="0"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
echo "osd_status_${osd}.value ${osd_status}"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "multigraph osd_utilization"
|
||||||
|
for id in $( seq 0 $(( ${cluster_osd_count} - 1 )) ); do
|
||||||
|
osd="$( $JQ_CMD -r ".nodes[$id].id" <<<"${OSD_JSON_OUTPUT}" )"
|
||||||
|
osd_utilization="$( $JQ_CMD -r ".nodes[$id].utilization" <<<"${OSD_JSON_OUTPUT}" )"
|
||||||
|
echo "osd_utilization_${osd}.value ${osd_utilization}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
case $# in
|
||||||
|
0)
|
||||||
|
output_values
|
||||||
|
;;
|
||||||
|
1)
|
||||||
|
case $1 in
|
||||||
|
autoconf)
|
||||||
|
output_autoconf
|
||||||
|
;;
|
||||||
|
config)
|
||||||
|
output_config
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
output_usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
output_usage
|
||||||
|
exit 1
|
||||||
|
esac
|
@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description = Parallel Virtual Cluster node daemon
|
Description = Parallel Virtual Cluster node daemon
|
||||||
After = network-online.target
|
After = network.target
|
||||||
|
Wants = network-online.target
|
||||||
PartOf = pvc.target
|
PartOf = pvc.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -48,7 +48,7 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.43"
|
version = "0.9.45"
|
||||||
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
|
Reference in New Issue
Block a user