Compare commits
196 Commits
v0.9.50
...
d2757004db
Author | SHA1 | Date | |
---|---|---|---|
d2757004db | |||
7323269775 | |||
85463f9aec | |||
19c37c3ed5 | |||
7d2ea494e7 | |||
cb50eee2a9 | |||
f3f4eaadf1 | |||
313a5d1c7d | |||
b6d689b769 | |||
a0fccf83f7 | |||
46896c593e | |||
02138974fa | |||
c3d255be65 | |||
45fc8a47a3 | |||
07f2006f68 | |||
f4c7fdffb8 | |||
be1b67b8f0 | |||
d68f6a945e | |||
c776aba8b3 | |||
2461941421 | |||
68954a79ec | |||
a2fa6ed450 | |||
02a2f6a27a | |||
a75b951605 | |||
658e80350f | |||
3aa20fbaa3 | |||
6d101df1ff | |||
be6a3992c1 | |||
d76da0f25a | |||
bc722ce9b8 | |||
7890c32c59 | |||
6febcfdd97 | |||
11d8ce70cd | |||
a17d9439c0 | |||
9cd02eb148 | |||
459485c202 | |||
9f92d5d822 | |||
947ac561c8 | |||
ca143c1968 | |||
6e110b178c | |||
d07d37d08e | |||
0639b16c86 | |||
1cf8706a52 | |||
dd8f07526f | |||
5a5e5da663 | |||
739b60b91e | |||
16544227eb | |||
73e3746885 | |||
66230ce971 | |||
fbfbd70461 | |||
2506098223 | |||
83e887c4ee | |||
4eb0f3bb8a | |||
adc767e32f | |||
2083fd824a | |||
3aa74a3940 | |||
71d94bbeab | |||
718f689df9 | |||
268b5c0b86 | |||
b016b9bf3d | |||
7604b9611f | |||
b21278fd80 | |||
3b02034b70 | |||
c7a5b41b1e | |||
48b0091d3e | |||
2e94516ee2 | |||
d7f26b27ea | |||
872f35a7ee | |||
52c3e8ced3 | |||
1d7acf62bf | |||
c790c331a7 | |||
23165482df | |||
057071a7b7 | |||
554fa9f412 | |||
5a5f924268 | |||
cc309fc021 | |||
5f783f1663 | |||
bc89bb5b68 | |||
eb233ef588 | |||
d3efb54cb4 | |||
da15357c8a | |||
b6939a28c0 | |||
a1da479a4c | |||
ace4082820 | |||
4036af6045 | |||
f96de97861 | |||
04cad46305 | |||
e9dea4d2d1 | |||
39fd85fcc3 | |||
cbbab46b55 | |||
d1f2ce0b0a | |||
2f01edca14 | |||
12a3a3a6a6 | |||
c44732be83 | |||
a8b68e0968 | |||
e59152afee | |||
56021c443a | |||
ebdea165f1 | |||
fb0651fb05 | |||
35e7e11403 | |||
b7555468eb | |||
f1b4ee02ba | |||
4698edc98e | |||
40e7e04aad | |||
7f074847c4 | |||
b0b0b75605 | |||
89f62318bd | |||
925141ed65 | |||
f7a826bf52 | |||
e176f3b2f6 | |||
b339d5e641 | |||
d476b13cc0 | |||
ce8b2c22cc | |||
feab5d3479 | |||
ee348593c9 | |||
e403146bcf | |||
bde684dd3a | |||
992e003500 | |||
eaeb860a83 | |||
1198ca9f5c | |||
e79d200244 | |||
5b3bb9f306 | |||
5501586a47 | |||
c160648c5c | |||
fa37227127 | |||
2cac98963c | |||
8e50428707 | |||
a4953bc6ef | |||
3c10d57148 | |||
26d8551388 | |||
57342541dd | |||
50f8afd749 | |||
3449069e3d | |||
cb66b16045 | |||
8edce74b85 | |||
e9b69c4124 | |||
3948206225 | |||
a09578fcf5 | |||
73be807b84 | |||
4a9805578e | |||
f70f052df1 | |||
1e8841ce69 | |||
9c7d39d523 | |||
011490bcca | |||
8de63b2785 | |||
8f8f00b2e9 | |||
1daab49b50 | |||
9f6041b9cf | |||
5b27e438a9 | |||
3e8a85b029 | |||
19ac1e17c3 | |||
252175fb6f | |||
f39b041471 | |||
3b41759262 | |||
e514eed414 | |||
b81e70ec18 | |||
c2a473ed8b | |||
5355f6ff48 | |||
bf7823deb5 | |||
8ba371723e | |||
e10ac52116 | |||
341073521b | |||
16c38da5ef | |||
c8134d3a1c | |||
9f41373324 | |||
8e62d5b30b | |||
7a8eee244a | |||
7df5b8e52e | |||
6f96219023 | |||
51967e164b | |||
7a3a44d47c | |||
44491dd988 | |||
eba142f470 | |||
6cef68d157 | |||
e8caf3369e | |||
3e3776a25b | |||
6e0d0e264e | |||
1855d03a36 | |||
1a286dc8dd | |||
1b6d10e03a | |||
73c96d1e93 | |||
5841c98a59 | |||
bc6395c959 | |||
d582f87472 | |||
e9735113af | |||
722fd0a65d | |||
3b41beb0f3 | |||
d3392c0282 | |||
560c013e95 | |||
384c6320ef | |||
445dec1c38 | |||
534c7cd7f0 | |||
4014ef7714 | |||
180f0445ac | |||
|
074664d4c1 | ||
|
418ac23d40 |
13
CHANGELOG.md
13
CHANGELOG.md
@@ -1,18 +1,5 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.50](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.50)
|
||||
|
||||
* [Node Daemon/API/CLI] Adds free memory node selector
|
||||
* [Node Daemon] Fixes bug sending space-containing detect disk strings
|
||||
|
||||
###### [v0.9.49](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.49)
|
||||
|
||||
* [Node Daemon] Fixes bugs with OSD stat population on creation
|
||||
* [Node Daemon/API] Adds additional information to Zookeeper about OSDs
|
||||
* [Node Daemon] Refactors OSD removal for improved safety
|
||||
* [Node Daemon/API/CLI] Adds explicit support for replacing and refreshing (reimporting) OSDs
|
||||
* [API/CLI] Fixes a language inconsistency about "router mode"
|
||||
|
||||
###### [v0.9.48](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.48)
|
||||
|
||||
* [CLI] Fixes situation where only a single cluster is available
|
||||
|
@@ -25,7 +25,7 @@ import yaml
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.50"
|
||||
version = "0.9.48"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
|
@@ -1002,7 +1002,7 @@ class API_VM_Root(Resource):
|
||||
type: string
|
||||
node_selector:
|
||||
type: string
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
node_autostart:
|
||||
type: boolean
|
||||
description: Whether to autostart the VM when its node returns to ready domain state
|
||||
@@ -1252,7 +1252,7 @@ class API_VM_Root(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@@ -1297,15 +1297,13 @@ class API_VM_Root(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
default: none
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
default: mem
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
- none (cluster default)
|
||||
- in: query
|
||||
name: autostart
|
||||
type: boolean
|
||||
@@ -1399,7 +1397,7 @@ class API_VM_Element(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@@ -1446,11 +1444,10 @@ class API_VM_Element(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
default: none
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
default: mem
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
@@ -1629,7 +1626,7 @@ class API_VM_Metadata(Resource):
|
||||
type: string
|
||||
node_selector:
|
||||
type: string
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
node_autostart:
|
||||
type: string
|
||||
description: Whether to autostart the VM when its node returns to ready domain state
|
||||
@@ -1649,7 +1646,7 @@ class API_VM_Metadata(Resource):
|
||||
{"name": "limit"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@@ -1678,14 +1675,12 @@ class API_VM_Metadata(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
- none (cluster default)
|
||||
- in: query
|
||||
name: autostart
|
||||
type: boolean
|
||||
@@ -4102,102 +4097,6 @@ class API_Storage_Ceph_OSD_Element(Resource):
|
||||
"""
|
||||
return api_helper.ceph_osd_list(osdid)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
"name": "device",
|
||||
"required": True,
|
||||
"helptext": "A valid device or detect string must be specified.",
|
||||
},
|
||||
{
|
||||
"name": "weight",
|
||||
"required": True,
|
||||
"helptext": "An OSD weight must be specified.",
|
||||
},
|
||||
{
|
||||
"name": "yes-i-really-mean-it",
|
||||
"required": True,
|
||||
"helptext": "Please confirm that 'yes-i-really-mean-it'.",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
def post(self, osdid, reqargs):
|
||||
"""
|
||||
Replace a Ceph OSD in the cluster
|
||||
Note: This task may take up to 30s to complete and return
|
||||
---
|
||||
tags:
|
||||
- storage / ceph
|
||||
parameters:
|
||||
- in: query
|
||||
name: device
|
||||
type: string
|
||||
required: true
|
||||
description: The block device (e.g. "/dev/sdb", "/dev/disk/by-path/...", etc.) or detect string ("detect:NAME:SIZE:ID") to replace the OSD onto
|
||||
- in: query
|
||||
name: weight
|
||||
type: number
|
||||
required: true
|
||||
description: The Ceph CRUSH weight for the replaced OSD
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.ceph_osd_replace(
|
||||
osdid,
|
||||
reqargs.get("device", None),
|
||||
reqargs.get("weight", None),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
"name": "device",
|
||||
"required": True,
|
||||
"helptext": "A valid device or detect string must be specified.",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
def put(self, osdid, reqargs):
|
||||
"""
|
||||
Refresh (reimport) a Ceph OSD in the cluster
|
||||
Note: This task may take up to 30s to complete and return
|
||||
---
|
||||
tags:
|
||||
- storage / ceph
|
||||
parameters:
|
||||
- in: query
|
||||
name: device
|
||||
type: string
|
||||
required: true
|
||||
description: The block device (e.g. "/dev/sdb", "/dev/disk/by-path/...", etc.) or detect string ("detect:NAME:SIZE:ID") that the OSD should be using
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.ceph_osd_refresh(
|
||||
osdid,
|
||||
reqargs.get("device", None),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
|
@@ -224,7 +224,7 @@ def node_domain_state(zkhandler, node):
|
||||
@ZKConnection(config)
|
||||
def node_secondary(zkhandler, node):
|
||||
"""
|
||||
Take NODE out of primary coordinator mode.
|
||||
Take NODE out of primary router mode.
|
||||
"""
|
||||
retflag, retdata = pvc_node.secondary_node(zkhandler, node)
|
||||
|
||||
@@ -240,7 +240,7 @@ def node_secondary(zkhandler, node):
|
||||
@ZKConnection(config)
|
||||
def node_primary(zkhandler, node):
|
||||
"""
|
||||
Set NODE to primary coordinator mode.
|
||||
Set NODE to primary router mode.
|
||||
"""
|
||||
retflag, retdata = pvc_node.primary_node(zkhandler, node)
|
||||
|
||||
@@ -1301,38 +1301,6 @@ def ceph_osd_add(zkhandler, node, device, weight, ext_db_flag=False, ext_db_rati
|
||||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_osd_replace(zkhandler, osd_id, device, weight):
|
||||
"""
|
||||
Replace a Ceph OSD in the PVC Ceph storage cluster.
|
||||
"""
|
||||
retflag, retdata = pvc_ceph.replace_osd(zkhandler, osd_id, device, weight)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
else:
|
||||
retcode = 400
|
||||
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_osd_refresh(zkhandler, osd_id, device):
|
||||
"""
|
||||
Refresh (reimport) a Ceph OSD in the PVC Ceph storage cluster.
|
||||
"""
|
||||
retflag, retdata = pvc_ceph.refresh_osd(zkhandler, osd_id, device)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
else:
|
||||
retcode = 400
|
||||
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_osd_remove(zkhandler, osd_id, force_flag):
|
||||
"""
|
||||
|
@@ -255,46 +255,6 @@ def ceph_osd_add(config, node, device, weight, ext_db_flag, ext_db_ratio):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_osd_replace(config, osdid, device, weight):
|
||||
"""
|
||||
Replace an existing Ceph OSD with a new device
|
||||
|
||||
API endpoint: POST /api/v1/storage/ceph/osd/{osdid}
|
||||
API arguments: device={device}, weight={weight}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {"device": device, "weight": weight, "yes-i-really-mean-it": "yes"}
|
||||
response = call_api(config, "post", f"/storage/ceph/osd/{osdid}", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_osd_refresh(config, osdid, device):
|
||||
"""
|
||||
Refresh (reimport) an existing Ceph OSD with device {device}
|
||||
|
||||
API endpoint: PUT /api/v1/storage/ceph/osd/{osdid}
|
||||
API arguments: device={device}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {
|
||||
"device": device,
|
||||
}
|
||||
response = call_api(config, "put", f"/storage/ceph/osd/{osdid}", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_osd_remove(config, osdid, force_flag):
|
||||
"""
|
||||
Remove Ceph OSD
|
||||
@@ -410,22 +370,13 @@ def format_list_osd(osd_list):
|
||||
# If this happens, the node hasn't checked in fully yet, so use some dummy data
|
||||
if osd_information["stats"]["node"] == "|":
|
||||
for key in osd_information["stats"].keys():
|
||||
if (
|
||||
osd_information["stats"][key] == "|"
|
||||
or osd_information["stats"][key] is None
|
||||
):
|
||||
if osd_information["stats"][key] == "|":
|
||||
osd_information["stats"][key] = "N/A"
|
||||
elif osd_information["stats"][key] is None:
|
||||
osd_information["stats"][key] = "N/A"
|
||||
for key in osd_information.keys():
|
||||
if osd_information[key] is None:
|
||||
osd_information[key] = "N/A"
|
||||
else:
|
||||
for key in osd_information["stats"].keys():
|
||||
if key in ["utilization", "var"] and isinstance(
|
||||
osd_information["stats"][key], float
|
||||
):
|
||||
osd_information["stats"][key] = round(
|
||||
osd_information["stats"][key], 2
|
||||
)
|
||||
except KeyError:
|
||||
print(
|
||||
f"Details for OSD {osd_information['id']} missing required keys, skipping."
|
||||
@@ -498,11 +449,13 @@ def format_list_osd(osd_list):
|
||||
if _osd_free_length > osd_free_length:
|
||||
osd_free_length = _osd_free_length
|
||||
|
||||
_osd_util_length = len(str(osd_information["stats"]["utilization"])) + 1
|
||||
osd_util = round(osd_information["stats"]["utilization"], 2)
|
||||
_osd_util_length = len(str(osd_util)) + 1
|
||||
if _osd_util_length > osd_util_length:
|
||||
osd_util_length = _osd_util_length
|
||||
|
||||
_osd_var_length = len(str(osd_information["stats"]["var"])) + 1
|
||||
osd_var = round(osd_information["stats"]["var"], 2)
|
||||
_osd_var_length = len(str(osd_var)) + 1
|
||||
if _osd_var_length > osd_var_length:
|
||||
osd_var_length = _osd_var_length
|
||||
|
||||
@@ -652,6 +605,8 @@ def format_list_osd(osd_list):
|
||||
osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour = getOutputColoursOSD(
|
||||
osd_information
|
||||
)
|
||||
osd_util = round(osd_information["stats"]["utilization"], 2)
|
||||
osd_var = round(osd_information["stats"]["var"], 2)
|
||||
|
||||
osd_db_device = osd_information["db_device"]
|
||||
if not osd_db_device:
|
||||
@@ -714,8 +669,8 @@ def format_list_osd(osd_list):
|
||||
osd_reweight=osd_information["stats"]["reweight"],
|
||||
osd_used=osd_information["stats"]["used"],
|
||||
osd_free=osd_information["stats"]["avail"],
|
||||
osd_util=osd_information["stats"]["utilization"],
|
||||
osd_var=osd_information["stats"]["var"],
|
||||
osd_util=osd_util,
|
||||
osd_var=osd_var,
|
||||
osd_wrops=osd_information["stats"]["wr_ops"],
|
||||
osd_wrdata=osd_information["stats"]["wr_data"],
|
||||
osd_rdops=osd_information["stats"]["rd_ops"],
|
||||
|
@@ -486,7 +486,7 @@ def cli_node():
|
||||
@cluster_req
|
||||
def node_secondary(node, wait):
|
||||
"""
|
||||
Take NODE out of primary coordinator mode.
|
||||
Take NODE out of primary router mode.
|
||||
"""
|
||||
|
||||
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
||||
@@ -539,7 +539,7 @@ def node_secondary(node, wait):
|
||||
@cluster_req
|
||||
def node_primary(node, wait):
|
||||
"""
|
||||
Put NODE into primary coordinator mode.
|
||||
Put NODE into primary router mode.
|
||||
"""
|
||||
|
||||
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
||||
@@ -803,11 +803,11 @@ def cli_vm():
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--node-selector",
|
||||
"--selector",
|
||||
"node_selector",
|
||||
default="none",
|
||||
default="mem",
|
||||
show_default=True,
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@@ -857,18 +857,6 @@ def vm_define(
|
||||
):
|
||||
"""
|
||||
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
|
||||
|
||||
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
|
||||
* "mem": choose the node with the least provisioned VM memory
|
||||
* "memfree": choose the node with the most (real) free memory
|
||||
* "vcpus": choose the node with the least allocated VM vCPUs
|
||||
* "load": choose the node with the lowest current load average
|
||||
* "vms": choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* "mem" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* "memfree" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to "mem" on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
"""
|
||||
|
||||
# Open the XML file
|
||||
@@ -910,11 +898,11 @@ def vm_define(
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--node-selector",
|
||||
"--selector",
|
||||
"node_selector",
|
||||
default=None,
|
||||
show_default=False,
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@@ -954,8 +942,6 @@ def vm_meta(
|
||||
):
|
||||
"""
|
||||
Modify the PVC metadata of existing virtual machine DOMAIN. At least one option to update must be specified. DOMAIN may be a UUID or name.
|
||||
|
||||
For details on the "--node-selector"/"-s" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
|
||||
if (
|
||||
@@ -3385,74 +3371,6 @@ def ceph_osd_add(node, device, weight, ext_db_flag, ext_db_ratio, confirm_flag):
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage osd replace
|
||||
###############################################################################
|
||||
@click.command(name="replace", short_help="Replace OSD block device.")
|
||||
@click.argument("osdid")
|
||||
@click.argument("device")
|
||||
@click.option(
|
||||
"-w",
|
||||
"--weight",
|
||||
"weight",
|
||||
default=1.0,
|
||||
show_default=True,
|
||||
help="New weight of the OSD within the CRUSH map.",
|
||||
)
|
||||
@click.option(
|
||||
"-y",
|
||||
"--yes",
|
||||
"confirm_flag",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help="Confirm the removal",
|
||||
)
|
||||
@cluster_req
|
||||
def ceph_osd_replace(osdid, device, weight, confirm_flag):
|
||||
"""
|
||||
Replace the block device of an existing OSD with ID OSDID with DEVICE. Use this command to replace a failed or smaller OSD block device with a new one.
|
||||
|
||||
DEVICE must be a valid raw block device (e.g. '/dev/sda', '/dev/nvme0n1', '/dev/disk/by-path/...', '/dev/disk/by-id/...') or a "detect" string. Using partitions is not supported. A "detect" string is a string in the form "detect:<NAME>:<HUMAN-SIZE>:<ID>". For details, see 'pvc storage osd add --help'.
|
||||
|
||||
The weight of an OSD should reflect the ratio of the OSD to other OSDs in the storage cluster. For details, see 'pvc storage osd add --help'. Note that the current weight must be explicitly specified if it differs from the default.
|
||||
|
||||
Existing IDs, external DB devices, etc. of the OSD will be preserved; data will be lost and rebuilt from the remaining healthy OSDs.
|
||||
"""
|
||||
if not confirm_flag and not config["unsafe"]:
|
||||
try:
|
||||
click.confirm(
|
||||
"Replace OSD {} with block device {}".format(osdid, device),
|
||||
prompt_suffix="? ",
|
||||
abort=True,
|
||||
)
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_osd_replace(config, osdid, device, weight)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage osd refresh
|
||||
###############################################################################
|
||||
@click.command(name="refresh", short_help="Refresh (reimport) OSD device.")
|
||||
@click.argument("osdid")
|
||||
@click.argument("device")
|
||||
@cluster_req
|
||||
def ceph_osd_refresh(osdid, device):
|
||||
"""
|
||||
Refresh (reimport) the block DEVICE of an existing OSD with ID OSDID. Use this command to reimport a working OSD into a rebuilt/replaced node.
|
||||
|
||||
DEVICE must be a valid raw block device (e.g. '/dev/sda', '/dev/nvme0n1', '/dev/disk/by-path/...', '/dev/disk/by-id/...') or a "detect" string. Using partitions is not supported. A "detect" string is a string in the form "detect:<NAME>:<HUMAN-SIZE>:<ID>". For details, see 'pvc storage osd add --help'.
|
||||
|
||||
Existing data, IDs, weights, etc. of the OSD will be preserved.
|
||||
|
||||
NOTE: If a device had an external DB device, this is not automatically handled at this time. It is best to remove and re-add the OSD instead.
|
||||
"""
|
||||
retcode, retmsg = pvc_ceph.ceph_osd_refresh(config, osdid, device)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage osd remove
|
||||
###############################################################################
|
||||
@@ -4116,9 +4034,7 @@ def provisioner_template_system_list(limit):
|
||||
@click.option(
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
type=click.Choice(["mem", "vcpus", "vms", "load", "none"], case_sensitive=False),
|
||||
default="none",
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@@ -4151,8 +4067,6 @@ def provisioner_template_system_add(
|
||||
):
|
||||
"""
|
||||
Add a new system template NAME to the PVC cluster provisioner.
|
||||
|
||||
For details on the possible "--node-selector" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
params = dict()
|
||||
params["name"] = name
|
||||
@@ -4212,9 +4126,7 @@ def provisioner_template_system_add(
|
||||
@click.option(
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
type=click.Choice(["mem", "vcpus", "vms", "load", "none"], case_sensitive=False),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@@ -4246,8 +4158,6 @@ def provisioner_template_system_modify(
|
||||
):
|
||||
"""
|
||||
Add a new system template NAME to the PVC cluster provisioner.
|
||||
|
||||
For details on the possible "--node-selector" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
params = dict()
|
||||
params["vcpus"] = vcpus
|
||||
@@ -5962,8 +5872,6 @@ ceph_benchmark.add_command(ceph_benchmark_list)
|
||||
|
||||
ceph_osd.add_command(ceph_osd_create_db_vg)
|
||||
ceph_osd.add_command(ceph_osd_add)
|
||||
ceph_osd.add_command(ceph_osd_replace)
|
||||
ceph_osd.add_command(ceph_osd_refresh)
|
||||
ceph_osd.add_command(ceph_osd_remove)
|
||||
ceph_osd.add_command(ceph_osd_in)
|
||||
ceph_osd.add_command(ceph_osd_out)
|
||||
|
@@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.50",
|
||||
version="0.9.48",
|
||||
packages=["pvc", "pvc.cli_lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
|
@@ -236,7 +236,7 @@ def add_osd_db_vg(zkhandler, node, device):
|
||||
return success, message
|
||||
|
||||
|
||||
# OSD actions use the /cmd/ceph pipe
|
||||
# OSD addition and removal uses the /cmd/ceph pipe
|
||||
# These actions must occur on the specific node they reference
|
||||
def add_osd(zkhandler, node, device, weight, ext_db_flag=False, ext_db_ratio=0.05):
|
||||
# Verify the target node exists
|
||||
@@ -288,103 +288,6 @@ def add_osd(zkhandler, node, device, weight, ext_db_flag=False, ext_db_ratio=0.0
|
||||
return success, message
|
||||
|
||||
|
||||
def replace_osd(zkhandler, osd_id, new_device, weight):
|
||||
# Get current OSD information
|
||||
osd_information = getOSDInformation(zkhandler, osd_id)
|
||||
node = osd_information["node"]
|
||||
old_device = osd_information["device"]
|
||||
ext_db_flag = True if osd_information["db_device"] else False
|
||||
|
||||
# Verify target block device isn't in use
|
||||
block_osd = verifyOSDBlock(zkhandler, node, new_device)
|
||||
if block_osd and block_osd != osd_id:
|
||||
return (
|
||||
False,
|
||||
'ERROR: Block device "{}" on node "{}" is used by OSD "{}"'.format(
|
||||
new_device, node, block_osd
|
||||
),
|
||||
)
|
||||
|
||||
# Tell the cluster to create a new OSD for the host
|
||||
replace_osd_string = "osd_replace {},{},{},{},{},{}".format(
|
||||
node, osd_id, old_device, new_device, weight, ext_db_flag
|
||||
)
|
||||
zkhandler.write([("base.cmd.ceph", replace_osd_string)])
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
with zkhandler.readlock("base.cmd.ceph"):
|
||||
try:
|
||||
result = zkhandler.read("base.cmd.ceph").split()[0]
|
||||
if result == "success-osd_replace":
|
||||
message = 'Replaced OSD {} with block device "{}" on node "{}".'.format(
|
||||
osd_id, new_device, node
|
||||
)
|
||||
success = True
|
||||
else:
|
||||
message = "ERROR: Failed to replace OSD; check node logs for details."
|
||||
success = False
|
||||
except Exception:
|
||||
message = "ERROR: Command ignored by node."
|
||||
success = False
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
with zkhandler.writelock("base.cmd.ceph"):
|
||||
time.sleep(0.5)
|
||||
zkhandler.write([("base.cmd.ceph", "")])
|
||||
|
||||
return success, message
|
||||
|
||||
|
||||
def refresh_osd(zkhandler, osd_id, device):
|
||||
# Get current OSD information
|
||||
osd_information = getOSDInformation(zkhandler, osd_id)
|
||||
node = osd_information["node"]
|
||||
ext_db_flag = True if osd_information["db_device"] else False
|
||||
|
||||
# Verify target block device isn't in use
|
||||
block_osd = verifyOSDBlock(zkhandler, node, device)
|
||||
if not block_osd or block_osd != osd_id:
|
||||
return (
|
||||
False,
|
||||
'ERROR: Block device "{}" on node "{}" is not used by OSD "{}"; use replace instead'.format(
|
||||
device, node, osd_id
|
||||
),
|
||||
)
|
||||
|
||||
# Tell the cluster to create a new OSD for the host
|
||||
refresh_osd_string = "osd_refresh {},{},{},{}".format(
|
||||
node, osd_id, device, ext_db_flag
|
||||
)
|
||||
zkhandler.write([("base.cmd.ceph", refresh_osd_string)])
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
with zkhandler.readlock("base.cmd.ceph"):
|
||||
try:
|
||||
result = zkhandler.read("base.cmd.ceph").split()[0]
|
||||
if result == "success-osd_refresh":
|
||||
message = (
|
||||
'Refreshed OSD {} with block device "{}" on node "{}".'.format(
|
||||
osd_id, device, node
|
||||
)
|
||||
)
|
||||
success = True
|
||||
else:
|
||||
message = "ERROR: Failed to refresh OSD; check node logs for details."
|
||||
success = False
|
||||
except Exception:
|
||||
message = "ERROR: Command ignored by node."
|
||||
success = False
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
with zkhandler.writelock("base.cmd.ceph"):
|
||||
time.sleep(0.5)
|
||||
zkhandler.write([("base.cmd.ceph", "")])
|
||||
|
||||
return success, message
|
||||
|
||||
|
||||
def remove_osd(zkhandler, osd_id, force_flag):
|
||||
if not verifyOSD(zkhandler, osd_id):
|
||||
return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(
|
||||
|
@@ -639,8 +639,6 @@ def findTargetNode(zkhandler, dom_uuid):
|
||||
# Execute the search
|
||||
if search_field == "mem":
|
||||
return findTargetNodeMem(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "memfree":
|
||||
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "load":
|
||||
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "vcpus":
|
||||
@@ -679,7 +677,7 @@ def getNodes(zkhandler, node_limit, dom_uuid):
|
||||
|
||||
|
||||
#
|
||||
# via provisioned memory
|
||||
# via free memory (relative to allocated memory)
|
||||
#
|
||||
def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
most_provfree = 0
|
||||
@@ -700,24 +698,6 @@ def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via free memory
|
||||
#
|
||||
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
||||
most_memfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memfree = int(zkhandler.read(("node.memory.free", node)))
|
||||
|
||||
if memfree > most_memfree:
|
||||
most_memfree = memfree
|
||||
target_node = node
|
||||
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via load average
|
||||
#
|
||||
|
@@ -91,7 +91,7 @@ def secondary_node(zkhandler, node):
|
||||
if daemon_mode == "hypervisor":
|
||||
return (
|
||||
False,
|
||||
'ERROR: Cannot change coordinator mode on non-coordinator node "{}"'.format(
|
||||
'ERROR: Cannot change router mode on non-coordinator node "{}"'.format(
|
||||
node
|
||||
),
|
||||
)
|
||||
@@ -104,9 +104,9 @@ def secondary_node(zkhandler, node):
|
||||
# Get current state
|
||||
current_state = zkhandler.read(("node.state.router", node))
|
||||
if current_state == "secondary":
|
||||
return True, 'Node "{}" is already in secondary coordinator mode.'.format(node)
|
||||
return True, 'Node "{}" is already in secondary router mode.'.format(node)
|
||||
|
||||
retmsg = "Setting node {} in secondary coordinator mode.".format(node)
|
||||
retmsg = "Setting node {} in secondary router mode.".format(node)
|
||||
zkhandler.write([("base.config.primary_node", "none")])
|
||||
|
||||
return True, retmsg
|
||||
@@ -124,7 +124,7 @@ def primary_node(zkhandler, node):
|
||||
if daemon_mode == "hypervisor":
|
||||
return (
|
||||
False,
|
||||
'ERROR: Cannot change coordinator mode on non-coordinator node "{}"'.format(
|
||||
'ERROR: Cannot change router mode on non-coordinator node "{}"'.format(
|
||||
node
|
||||
),
|
||||
)
|
||||
@@ -137,9 +137,9 @@ def primary_node(zkhandler, node):
|
||||
# Get current state
|
||||
current_state = zkhandler.read(("node.state.router", node))
|
||||
if current_state == "primary":
|
||||
return True, 'Node "{}" is already in primary coordinator mode.'.format(node)
|
||||
return True, 'Node "{}" is already in primary router mode.'.format(node)
|
||||
|
||||
retmsg = "Setting node {} in primary coordinator mode.".format(node)
|
||||
retmsg = "Setting node {} in primary router mode.".format(node)
|
||||
zkhandler.write([("base.config.primary_node", node)])
|
||||
|
||||
return True, retmsg
|
||||
|
17
debian/changelog
vendored
17
debian/changelog
vendored
@@ -1,20 +1,3 @@
|
||||
pvc (0.9.50-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon/API/CLI] Adds free memory node selector
|
||||
* [Node Daemon] Fixes bug sending space-containing detect disk strings
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 06 Jul 2022 16:01:14 -0400
|
||||
|
||||
pvc (0.9.49-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon] Fixes bugs with OSD stat population on creation
|
||||
* [Node Daemon/API] Adds additional information to Zookeeper about OSDs
|
||||
* [Node Daemon] Refactors OSD removal for improved safety
|
||||
* [Node Daemon/API/CLI] Adds explicit support for replacing and refreshing (reimporting) OSDs
|
||||
* [API/CLI] Fixes a language inconsistency about "router mode"
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 06 May 2022 15:49:39 -0400
|
||||
|
||||
pvc (0.9.48-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes situation where only a single cluster is available
|
||||
|
@@ -353,19 +353,7 @@ The password for the PVC node daemon to log in to the IPMI interface.
|
||||
|
||||
* *required*
|
||||
|
||||
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
|
||||
|
||||
Valid `target_selector` values are:
|
||||
* `mem`: choose the node with the least provisioned VM memory
|
||||
* `memfree`: choose the node with the most (real) free memory
|
||||
* `vcpus`: choose the node with the least allocated VM vCPUs
|
||||
* `load`: choose the node with the lowest current load average
|
||||
* `vms`: choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* `mem` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* `memfree` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to `mem` on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
The selector algorithm to use when migrating hosts away from the node. Valid `selector` values are: `mem`: the node with the least allocated VM memory; `vcpus`: the node with the least allocated VM vCPUs; `load`: the node with the least current load average; `vms`: the node with the least number of provisioned VMs.
|
||||
|
||||
#### `system` → `configuration` → `directories` → `dynamic_directory`
|
||||
|
||||
|
@@ -192,7 +192,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"node_selector": {
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
@@ -1414,7 +1414,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"node_selector": {
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"type": "string"
|
||||
},
|
||||
"profile": {
|
||||
@@ -5094,13 +5094,6 @@
|
||||
"delete": {
|
||||
"description": "Note: This task may take up to 30s to complete and return<br/>Warning: This operation may have unintended consequences for the storage cluster; ensure the cluster can support removing the OSD before proceeding",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Force removal even if some step(s) fail",
|
||||
"in": "query",
|
||||
"name": "force",
|
||||
"required": "flase",
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"description": "A confirmation string to ensure that the API consumer really means it",
|
||||
"in": "query",
|
||||
@@ -5148,73 +5141,6 @@
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"description": "Note: This task may take up to 30s to complete and return",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The block device (e.g. \"/dev/sdb\", \"/dev/disk/by-path/...\", etc.) or detect string (\"detect:NAME:SIZE:ID\") to replace the OSD onto",
|
||||
"in": "query",
|
||||
"name": "device",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The Ceph CRUSH weight for the replaced OSD",
|
||||
"in": "query",
|
||||
"name": "weight",
|
||||
"required": true,
|
||||
"type": "number"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Replace a Ceph OSD in the cluster",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "Note: This task may take up to 30s to complete and return",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The block device (e.g. \"/dev/sdb\", \"/dev/disk/by-path/...\", etc.) or detect string (\"detect:NAME:SIZE:ID\") that the OSD should be using",
|
||||
"in": "query",
|
||||
"name": "device",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Refresh (reimport) a Ceph OSD in the cluster",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/osd/{osdid}/state": {
|
||||
@@ -6173,15 +6099,13 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"default": "none",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"default": "mem",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms",
|
||||
"none (cluster default)"
|
||||
"vms"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "selector",
|
||||
@@ -6332,11 +6256,10 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"default": "none",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"default": "mem",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms",
|
||||
@@ -6594,14 +6517,12 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms",
|
||||
"none (cluster default)"
|
||||
"vms"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "selector",
|
||||
|
@@ -122,7 +122,7 @@ pvc:
|
||||
pass: Passw0rd
|
||||
# migration: Migration option configuration
|
||||
migration:
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, memfree, load, vcpus, vms
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, load, vcpus, vms
|
||||
target_selector: mem
|
||||
# configuration: Local system configurations
|
||||
configuration:
|
||||
|
@@ -48,7 +48,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.50"
|
||||
version = "0.9.48"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
@@ -21,6 +21,7 @@
|
||||
|
||||
import time
|
||||
import json
|
||||
import psutil
|
||||
|
||||
import daemon_lib.common as common
|
||||
|
||||
@@ -216,10 +217,6 @@ class CephOSDInstance(object):
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph-volume lvm list {find_device}"
|
||||
)
|
||||
osd_blockdev = None
|
||||
osd_fsid = None
|
||||
osd_clusterfsid = None
|
||||
osd_device = None
|
||||
for line in stdout.split("\n"):
|
||||
if "block device" in line:
|
||||
osd_blockdev = line.split()[-1]
|
||||
@@ -230,7 +227,7 @@ class CephOSDInstance(object):
|
||||
if "devices" in line:
|
||||
osd_device = line.split()[-1]
|
||||
|
||||
if not osd_blockdev or not osd_fsid or not osd_clusterfsid or not osd_device:
|
||||
if not osd_fsid:
|
||||
self.logger.out(
|
||||
f"Failed to find updated OSD information via ceph-volume for {find_device}",
|
||||
state="e",
|
||||
@@ -407,7 +404,6 @@ class CephOSDInstance(object):
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
# 6. Verify it started
|
||||
@@ -438,7 +434,7 @@ class CephOSDInstance(object):
|
||||
(("osd.lv", osd_id), osd_lv),
|
||||
(
|
||||
("osd.stats", osd_id),
|
||||
'{"uuid": "|", "up": 0, "in": 0, "primary_affinity": "|", "utilization": "|", "var": "|", "pgs": "|", "kb": "|", "weight": "|", "reweight": "|", "node": "|", "used": "|", "avail": "|", "wr_ops": "|", "wr_data": "|", "rd_ops": "|", "rd_data": "|", "state": "|"}',
|
||||
f'{"uuid": "|", "up": 0, "in": 0, "primary_affinity": "|", "utilization": "|", "var": "|", "pgs": "|", "kb": "|", "weight": "|", "reweight": "|", "node": "{node}", "used": "|", "avail": "|", "wr_ops": "|", "wr_data": "|", "rd_ops": "|", "rd_data": "|", state="|" }',
|
||||
),
|
||||
]
|
||||
)
|
||||
@@ -451,381 +447,6 @@ class CephOSDInstance(object):
|
||||
logger.out("Failed to create new OSD disk: {}".format(e), state="e")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def replace_osd(
|
||||
zkhandler,
|
||||
logger,
|
||||
node,
|
||||
osd_id,
|
||||
old_device,
|
||||
new_device,
|
||||
weight,
|
||||
ext_db_flag=False,
|
||||
):
|
||||
# Handle a detect device if that is passed
|
||||
if match(r"detect:", new_device):
|
||||
ddevice = get_detect_device(new_device)
|
||||
if ddevice is None:
|
||||
logger.out(
|
||||
f"Failed to determine block device from detect string {new_device}",
|
||||
state="e",
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.out(
|
||||
f"Determined block device {ddevice} from detect string {new_device}",
|
||||
state="i",
|
||||
)
|
||||
new_device = ddevice
|
||||
|
||||
# We are ready to create a new OSD on this node
|
||||
logger.out(
|
||||
"Replacing OSD {} disk with block device {}".format(osd_id, new_device),
|
||||
state="i",
|
||||
)
|
||||
try:
|
||||
# Verify the OSD is present
|
||||
retcode, stdout, stderr = common.run_os_command("ceph osd ls")
|
||||
osd_list = stdout.split("\n")
|
||||
if osd_id not in osd_list:
|
||||
logger.out(
|
||||
"Could not find OSD {} in the cluster".format(osd_id), state="e"
|
||||
)
|
||||
return True
|
||||
|
||||
# 1. Set the OSD down and out so it will flush
|
||||
logger.out("Setting down OSD disk with ID {}".format(osd_id), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd down {}".format(osd_id)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph osd down")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
logger.out("Setting out OSD disk with ID {}".format(osd_id), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd out {}".format(osd_id)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph osd out")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 2. Wait for the OSD to be safe to remove (but don't wait for rebalancing to complete)
|
||||
logger.out("Waiting for OSD {osd_id} to be safe to remove", state="i")
|
||||
while True:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd safe-to-destroy osd.{osd_id}"
|
||||
)
|
||||
if retcode in [0, 11]:
|
||||
# Code 0 = success
|
||||
# Code 11 = "Error EAGAIN: OSD(s) 5 have no reported stats, and not all PGs are active+clean; we cannot draw any conclusions." which means all PGs have been remappped but backfill is still occurring
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
# 3. Stop the OSD process
|
||||
logger.out("Stopping OSD disk with ID {}".format(osd_id), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"systemctl stop ceph-osd@{}".format(osd_id)
|
||||
)
|
||||
if retcode:
|
||||
print("systemctl stop")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
time.sleep(2)
|
||||
|
||||
# 4. Destroy the OSD
|
||||
logger.out("Destroying OSD with ID {osd_id}", state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd destroy {osd_id} --yes-i-really-mean-it"
|
||||
)
|
||||
if retcode:
|
||||
print("ceph osd destroy")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 5. Adjust the weight
|
||||
logger.out(
|
||||
"Adjusting weight of OSD disk with ID {} in CRUSH map".format(osd_id),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd crush reweight osd.{osdid} {weight}".format(
|
||||
osdid=osd_id, weight=weight
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph osd crush reweight")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 6a. Zap the new disk to ensure it is ready to go
|
||||
logger.out("Zapping disk {}".format(new_device), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm zap --destroy {}".format(new_device)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm zap")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
dev_flags = "--data {}".format(new_device)
|
||||
|
||||
# 6b. Prepare the logical volume if ext_db_flag
|
||||
if ext_db_flag:
|
||||
db_device = "osd-db/osd-{}".format(osd_id)
|
||||
dev_flags += " --block.db {}".format(db_device)
|
||||
else:
|
||||
db_device = ""
|
||||
|
||||
# 6c. Replace the OSD
|
||||
logger.out(
|
||||
"Preparing LVM for replaced OSD {} disk on {}".format(
|
||||
osd_id, new_device
|
||||
),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm prepare --osd-id {osdid} --bluestore {devices}".format(
|
||||
osdid=osd_id, devices=dev_flags
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm prepare")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 7a. Get OSD information
|
||||
logger.out(
|
||||
"Getting OSD information for ID {} on {}".format(osd_id, new_device),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm list {device}".format(device=new_device)
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
if "block device" in line:
|
||||
osd_blockdev = line.split()[-1]
|
||||
if "osd fsid" in line:
|
||||
osd_fsid = line.split()[-1]
|
||||
if "cluster fsid" in line:
|
||||
osd_clusterfsid = line.split()[-1]
|
||||
if "devices" in line:
|
||||
osd_device = line.split()[-1]
|
||||
|
||||
if not osd_fsid:
|
||||
print("ceph-volume lvm list")
|
||||
print("Could not find OSD information in data:")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# Split OSD blockdev into VG and LV components
|
||||
# osd_blockdev = /dev/ceph-<uuid>/osd-block-<uuid>
|
||||
_, _, osd_vg, osd_lv = osd_blockdev.split("/")
|
||||
|
||||
# Reset whatever we were given to Ceph's /dev/xdX naming
|
||||
if new_device != osd_device:
|
||||
new_device = osd_device
|
||||
|
||||
# 7b. Activate the OSD
|
||||
logger.out("Activating new OSD disk with ID {}".format(osd_id), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm activate --bluestore {osdid} {osdfsid}".format(
|
||||
osdid=osd_id, osdfsid=osd_fsid
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm activate")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
# 8. Verify it started
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"systemctl status ceph-osd@{osdid}".format(osdid=osd_id)
|
||||
)
|
||||
if retcode:
|
||||
print("systemctl status")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 9. Update Zookeeper information
|
||||
logger.out(
|
||||
"Adding new OSD disk with ID {} to Zookeeper".format(osd_id), state="i"
|
||||
)
|
||||
zkhandler.write(
|
||||
[
|
||||
(("osd", osd_id), ""),
|
||||
(("osd.node", osd_id), node),
|
||||
(("osd.device", osd_id), new_device),
|
||||
(("osd.db_device", osd_id), db_device),
|
||||
(("osd.fsid", osd_id), ""),
|
||||
(("osd.ofsid", osd_id), osd_fsid),
|
||||
(("osd.cfsid", osd_id), osd_clusterfsid),
|
||||
(("osd.lvm", osd_id), ""),
|
||||
(("osd.vg", osd_id), osd_vg),
|
||||
(("osd.lv", osd_id), osd_lv),
|
||||
(
|
||||
("osd.stats", osd_id),
|
||||
'{"uuid": "|", "up": 0, "in": 0, "primary_affinity": "|", "utilization": "|", "var": "|", "pgs": "|", "kb": "|", "weight": "|", "reweight": "|", "node": "|", "used": "|", "avail": "|", "wr_ops": "|", "wr_data": "|", "rd_ops": "|", "rd_data": "|", "state": "|"}',
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Log it
|
||||
logger.out(
|
||||
"Replaced OSD {} disk with device {}".format(osd_id, new_device),
|
||||
state="o",
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
# Log it
|
||||
logger.out("Failed to replace OSD {} disk: {}".format(osd_id, e), state="e")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def refresh_osd(zkhandler, logger, node, osd_id, device, ext_db_flag):
|
||||
# Handle a detect device if that is passed
|
||||
if match(r"detect:", device):
|
||||
ddevice = get_detect_device(device)
|
||||
if ddevice is None:
|
||||
logger.out(
|
||||
f"Failed to determine block device from detect string {device}",
|
||||
state="e",
|
||||
)
|
||||
return False
|
||||
else:
|
||||
logger.out(
|
||||
f"Determined block device {ddevice} from detect string {device}",
|
||||
state="i",
|
||||
)
|
||||
device = ddevice
|
||||
|
||||
# We are ready to create a new OSD on this node
|
||||
logger.out(
|
||||
"Refreshing OSD {} disk on block device {}".format(osd_id, device),
|
||||
state="i",
|
||||
)
|
||||
try:
|
||||
# 1. Verify the OSD is present
|
||||
retcode, stdout, stderr = common.run_os_command("ceph osd ls")
|
||||
osd_list = stdout.split("\n")
|
||||
if osd_id not in osd_list:
|
||||
logger.out(
|
||||
"Could not find OSD {} in the cluster".format(osd_id), state="e"
|
||||
)
|
||||
return True
|
||||
|
||||
dev_flags = "--data {}".format(device)
|
||||
|
||||
if ext_db_flag:
|
||||
db_device = "osd-db/osd-{}".format(osd_id)
|
||||
dev_flags += " --block.db {}".format(db_device)
|
||||
else:
|
||||
db_device = ""
|
||||
|
||||
# 2. Get OSD information
|
||||
logger.out(
|
||||
"Getting OSD information for ID {} on {}".format(osd_id, device),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm list {device}".format(device=device)
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
if "block device" in line:
|
||||
osd_blockdev = line.split()[-1]
|
||||
if "osd fsid" in line:
|
||||
osd_fsid = line.split()[-1]
|
||||
if "cluster fsid" in line:
|
||||
osd_clusterfsid = line.split()[-1]
|
||||
if "devices" in line:
|
||||
osd_device = line.split()[-1]
|
||||
|
||||
if not osd_fsid:
|
||||
print("ceph-volume lvm list")
|
||||
print("Could not find OSD information in data:")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# Split OSD blockdev into VG and LV components
|
||||
# osd_blockdev = /dev/ceph-<uuid>/osd-block-<uuid>
|
||||
_, _, osd_vg, osd_lv = osd_blockdev.split("/")
|
||||
|
||||
# Reset whatever we were given to Ceph's /dev/xdX naming
|
||||
if device != osd_device:
|
||||
device = osd_device
|
||||
|
||||
# 3. Activate the OSD
|
||||
logger.out("Activating new OSD disk with ID {}".format(osd_id), state="i")
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm activate --bluestore {osdid} {osdfsid}".format(
|
||||
osdid=osd_id, osdfsid=osd_fsid
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm activate")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
# 4. Verify it started
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"systemctl status ceph-osd@{osdid}".format(osdid=osd_id)
|
||||
)
|
||||
if retcode:
|
||||
print("systemctl status")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
raise Exception
|
||||
|
||||
# 5. Update Zookeeper information
|
||||
logger.out(
|
||||
"Adding new OSD disk with ID {} to Zookeeper".format(osd_id), state="i"
|
||||
)
|
||||
zkhandler.write(
|
||||
[
|
||||
(("osd", osd_id), ""),
|
||||
(("osd.node", osd_id), node),
|
||||
(("osd.device", osd_id), device),
|
||||
(("osd.db_device", osd_id), db_device),
|
||||
(("osd.fsid", osd_id), ""),
|
||||
(("osd.ofsid", osd_id), osd_fsid),
|
||||
(("osd.cfsid", osd_id), osd_clusterfsid),
|
||||
(("osd.lvm", osd_id), ""),
|
||||
(("osd.vg", osd_id), osd_vg),
|
||||
(("osd.lv", osd_id), osd_lv),
|
||||
(
|
||||
("osd.stats", osd_id),
|
||||
'{"uuid": "|", "up": 0, "in": 0, "primary_affinity": "|", "utilization": "|", "var": "|", "pgs": "|", "kb": "|", "weight": "|", "reweight": "|", "node": "|", "used": "|", "avail": "|", "wr_ops": "|", "wr_data": "|", "rd_ops": "|", "rd_data": "|", "state": "|"}',
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Log it
|
||||
logger.out("Refreshed OSD {} disk on {}".format(osd_id, device), state="o")
|
||||
return True
|
||||
except Exception as e:
|
||||
# Log it
|
||||
logger.out("Failed to refresh OSD {} disk: {}".format(osd_id, e), state="e")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def remove_osd(zkhandler, logger, osd_id, osd_obj, force_flag):
|
||||
logger.out("Removing OSD disk {}".format(osd_id), state="i")
|
||||
@@ -869,16 +490,29 @@ class CephOSDInstance(object):
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
# 2. Wait for the OSD to be safe to remove (but don't wait for rebalancing to complete)
|
||||
logger.out("Waiting for OSD {osd_id} to be safe to remove", state="i")
|
||||
# 2. Wait for the OSD to flush
|
||||
logger.out("Flushing OSD disk with ID {}".format(osd_id), state="i")
|
||||
osd_string = str()
|
||||
while True:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd safe-to-destroy osd.{osd_id}"
|
||||
)
|
||||
if int(retcode) in [0, 11]:
|
||||
try:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph pg dump osds --format json"
|
||||
)
|
||||
dump_string = json.loads(stdout)
|
||||
for osd in dump_string:
|
||||
if str(osd["osd"]) == osd_id:
|
||||
osd_string = osd
|
||||
num_pgs = osd_string["num_pgs"]
|
||||
if num_pgs > 0:
|
||||
time.sleep(5)
|
||||
else:
|
||||
if force_flag:
|
||||
logger.out("Ignoring error due to force flag", state="i")
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
except Exception:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
# 3. Stop the OSD process and wait for it to be terminated
|
||||
logger.out("Stopping OSD disk with ID {}".format(osd_id), state="i")
|
||||
@@ -893,51 +527,62 @@ class CephOSDInstance(object):
|
||||
logger.out("Ignoring error due to force flag", state="i")
|
||||
else:
|
||||
raise Exception
|
||||
time.sleep(2)
|
||||
|
||||
# FIXME: There has to be a better way to do this /shrug
|
||||
while True:
|
||||
is_osd_up = False
|
||||
# Find if there is a process named ceph-osd with arg '--id {id}'
|
||||
for p in psutil.process_iter(attrs=["name", "cmdline"]):
|
||||
if "ceph-osd" == p.info["name"] and "--id {}".format(
|
||||
osd_id
|
||||
) in " ".join(p.info["cmdline"]):
|
||||
is_osd_up = True
|
||||
# If there isn't, continue
|
||||
if not is_osd_up:
|
||||
break
|
||||
|
||||
# 4. Determine the block devices
|
||||
osd_vg = zkhandler.read(("osd.vg", osd_id))
|
||||
osd_lv = zkhandler.read(("osd.lv", osd_id))
|
||||
osd_lvm = f"/dev/{osd_vg}/{osd_lv}"
|
||||
osd_device = None
|
||||
device_zk = zkhandler.read(("osd.device", osd_id))
|
||||
try:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"readlink /var/lib/ceph/osd/ceph-{}/block".format(osd_id)
|
||||
)
|
||||
vg_name = stdout.split("/")[
|
||||
-2
|
||||
] # e.g. /dev/ceph-<uuid>/osd-block-<uuid>
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"vgs --separator , --noheadings -o pv_name {}".format(vg_name)
|
||||
)
|
||||
pv_block = stdout.strip()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pv_block = device_zk
|
||||
|
||||
# 5a. Verify that the blockdev actually has a ceph volume that matches the ID, otherwise don't zap it
|
||||
logger.out(
|
||||
f"Getting disk info for OSD {osd_id} LV {osd_lvm}",
|
||||
f"Check OSD disk {pv_block} for OSD signature with ID osd.{osd_id}",
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph-volume lvm list {osd_lvm}"
|
||||
f"ceph-volume lvm list {pv_block}"
|
||||
)
|
||||
for line in stdout.split("\n"):
|
||||
if "devices" in line:
|
||||
osd_device = line.split()[-1]
|
||||
|
||||
if not osd_device:
|
||||
print("ceph-volume lvm list")
|
||||
print("Could not find OSD information in data:")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
if force_flag:
|
||||
logger.out("Ignoring error due to force flag", state="i")
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
# 5. Zap the volumes
|
||||
logger.out(
|
||||
"Zapping OSD {} disk on {}".format(osd_id, osd_device),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm zap --destroy {}".format(osd_device)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm zap")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
if force_flag:
|
||||
logger.out("Ignoring error due to force flag", state="i")
|
||||
else:
|
||||
raise Exception
|
||||
if f"====== osd.{osd_id} =======" in stdout:
|
||||
# 5b. Zap the volumes
|
||||
logger.out(
|
||||
"Zapping OSD disk with ID {} on {}".format(osd_id, pv_block),
|
||||
state="i",
|
||||
)
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph-volume lvm zap --destroy {}".format(pv_block)
|
||||
)
|
||||
if retcode:
|
||||
print("ceph-volume lvm zap")
|
||||
print(stdout)
|
||||
print(stderr)
|
||||
if force_flag:
|
||||
logger.out("Ignoring error due to force flag", state="i")
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
# 6. Purge the OSD from Ceph
|
||||
logger.out("Purging OSD disk with ID {}".format(osd_id), state="i")
|
||||
@@ -1239,9 +884,8 @@ class CephSnapshotInstance(object):
|
||||
# Primary command function
|
||||
# This command pipe is only used for OSD adds and removes
|
||||
def ceph_command(zkhandler, logger, this_node, data, d_osd):
|
||||
# Get the command and args; the * + join ensures arguments with spaces (e.g. detect strings) are recombined right
|
||||
command, *args = data.split()
|
||||
args = " ".join(args)
|
||||
# Get the command and args
|
||||
command, args = data.split()
|
||||
|
||||
# Adding a new OSD
|
||||
if command == "osd_add":
|
||||
@@ -1267,59 +911,6 @@ def ceph_command(zkhandler, logger, this_node, data, d_osd):
|
||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||
time.sleep(1)
|
||||
|
||||
# Replacing an OSD
|
||||
if command == "osd_replace":
|
||||
node, osd_id, old_device, new_device, weight, ext_db_flag = args.split(",")
|
||||
ext_db_flag = bool(strtobool(ext_db_flag))
|
||||
if node == this_node.name:
|
||||
# Lock the command queue
|
||||
zk_lock = zkhandler.writelock("base.cmd.ceph")
|
||||
with zk_lock:
|
||||
# Add the OSD
|
||||
result = CephOSDInstance.replace_osd(
|
||||
zkhandler,
|
||||
logger,
|
||||
node,
|
||||
osd_id,
|
||||
old_device,
|
||||
new_device,
|
||||
weight,
|
||||
ext_db_flag,
|
||||
)
|
||||
# Command succeeded
|
||||
if result:
|
||||
# Update the command queue
|
||||
zkhandler.write([("base.cmd.ceph", "success-{}".format(data))])
|
||||
# Command failed
|
||||
else:
|
||||
# Update the command queue
|
||||
zkhandler.write([("base.cmd.ceph", "failure-{}".format(data))])
|
||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||
time.sleep(1)
|
||||
|
||||
# Refreshing an OSD
|
||||
if command == "osd_refresh":
|
||||
node, osd_id, device, ext_db_flag = args.split(",")
|
||||
ext_db_flag = bool(strtobool(ext_db_flag))
|
||||
if node == this_node.name:
|
||||
# Lock the command queue
|
||||
zk_lock = zkhandler.writelock("base.cmd.ceph")
|
||||
with zk_lock:
|
||||
# Add the OSD
|
||||
result = CephOSDInstance.refresh_osd(
|
||||
zkhandler, logger, node, osd_id, device, ext_db_flag
|
||||
)
|
||||
# Command succeeded
|
||||
if result:
|
||||
# Update the command queue
|
||||
zkhandler.write([("base.cmd.ceph", "success-{}".format(data))])
|
||||
# Command failed
|
||||
else:
|
||||
# Update the command queue
|
||||
zkhandler.write([("base.cmd.ceph", "failure-{}".format(data))])
|
||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||
time.sleep(1)
|
||||
|
||||
# Removing an OSD
|
||||
elif command == "osd_remove":
|
||||
osd_id, force = args.split(",")
|
||||
|
Reference in New Issue
Block a user