Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
ea709f573f | |||
1142454934 | |||
bbfad340a1 | |||
c73939e1c5 | |||
25fe45dd28 |
@ -1,5 +1,12 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.47](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.47)
|
||||
|
||||
* [Node Daemon/API/CLI] Adds Ceph pool device class/tier support
|
||||
* [API] Fixes a bug returning values if a Ceph pool has not yet reported stats
|
||||
* [API/CLI] Adds PGs count to the pool list output
|
||||
* [API/CLI] Adds Ceph pool PGs count adjustment support
|
||||
|
||||
###### [v0.9.46](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.46)
|
||||
|
||||
* [API] Fixes bugs with legacy benchmark display
|
||||
|
@ -25,7 +25,7 @@ import yaml
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.46"
|
||||
version = "0.9.47"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
|
@ -4226,6 +4226,12 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
volume_count:
|
||||
type: integer
|
||||
description: The number of volumes in the pool
|
||||
tier:
|
||||
type: string
|
||||
description: The device class/tier of the pool
|
||||
pgs:
|
||||
type: integer
|
||||
description: The number of PGs (placement groups) for the pool
|
||||
stats:
|
||||
type: object
|
||||
properties:
|
||||
@ -4307,6 +4313,12 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
"required": True,
|
||||
"helptext": "A valid replication configuration must be specified.",
|
||||
},
|
||||
{
|
||||
"name": "tier",
|
||||
"required": False,
|
||||
"choices": ("hdd", "ssd", "nvme", "default"),
|
||||
"helptext": "A valid tier must be specified",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
@ -4332,6 +4344,10 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
type: string
|
||||
required: true
|
||||
description: The replication configuration (e.g. "copies=3,mincopies=2") for the pool
|
||||
- in: query
|
||||
name: tier
|
||||
required: false
|
||||
description: The device tier for the pool (hdd, ssd, nvme, or default)
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
@ -4348,6 +4364,7 @@ class API_Storage_Ceph_Pool_Root(Resource):
|
||||
reqargs.get("pool", None),
|
||||
reqargs.get("pgs", None),
|
||||
reqargs.get("replcfg", None),
|
||||
reqargs.get("tier", None),
|
||||
)
|
||||
|
||||
|
||||
@ -4388,6 +4405,12 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
||||
"required": True,
|
||||
"helptext": "A valid replication configuration must be specified.",
|
||||
},
|
||||
{
|
||||
"name": "tier",
|
||||
"required": False,
|
||||
"choices": ("hdd", "ssd", "nvme", "default"),
|
||||
"helptext": "A valid tier must be specified",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
@ -4408,6 +4431,10 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
||||
type: string
|
||||
required: true
|
||||
description: The replication configuration (e.g. "copies=3,mincopies=2") for the pool
|
||||
- in: query
|
||||
name: tier
|
||||
required: false
|
||||
description: The device tier for the pool (hdd, ssd, nvme, or default)
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
@ -4426,7 +4453,54 @@ class API_Storage_Ceph_Pool_Element(Resource):
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.ceph_pool_add(
|
||||
pool, reqargs.get("pgs", None), reqargs.get("replcfg", None)
|
||||
pool,
|
||||
reqargs.get("pgs", None),
|
||||
reqargs.get("replcfg", None),
|
||||
reqargs.get("tier", None),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
[
|
||||
{
|
||||
"name": "pgs",
|
||||
"required": True,
|
||||
"helptext": "A placement group count must be specified.",
|
||||
},
|
||||
]
|
||||
)
|
||||
@Authenticator
|
||||
def put(self, pool, reqargs):
|
||||
"""
|
||||
Adjust Ceph pool {pool}'s placement group count
|
||||
---
|
||||
tags:
|
||||
- storage / ceph
|
||||
parameters:
|
||||
- in: query
|
||||
name: pgs
|
||||
type: integer
|
||||
required: true
|
||||
description: The new number of placement groups (PGs) for the pool
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
404:
|
||||
description: Not found
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.ceph_pool_set_pgs(
|
||||
pool,
|
||||
reqargs.get("pgs", 0),
|
||||
)
|
||||
|
||||
@RequestParser(
|
||||
|
@ -1403,11 +1403,11 @@ def ceph_pool_list(zkhandler, limit=None, is_fuzzy=True):
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_pool_add(zkhandler, name, pgs, replcfg):
|
||||
def ceph_pool_add(zkhandler, name, pgs, replcfg, tier=None):
|
||||
"""
|
||||
Add a Ceph RBD pool to the PVC Ceph storage cluster.
|
||||
"""
|
||||
retflag, retdata = pvc_ceph.add_pool(zkhandler, name, pgs, replcfg)
|
||||
retflag, retdata = pvc_ceph.add_pool(zkhandler, name, pgs, replcfg, tier)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
@ -1434,6 +1434,22 @@ def ceph_pool_remove(zkhandler, name):
|
||||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def ceph_pool_set_pgs(zkhandler, name, pgs):
|
||||
"""
|
||||
Set the PGs of a ceph RBD pool.
|
||||
"""
|
||||
retflag, retdata = pvc_ceph.set_pgs_pool(zkhandler, name, pgs)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
else:
|
||||
retcode = 400
|
||||
|
||||
output = {"message": retdata.replace('"', "'")}
|
||||
return output, retcode
|
||||
|
||||
|
||||
@pvc_common.Profiler(config)
|
||||
@ZKConnection(config)
|
||||
def ceph_volume_list(zkhandler, pool=None, limit=None, is_fuzzy=True):
|
||||
|
@ -708,7 +708,7 @@ def ceph_pool_info(config, pool):
|
||||
|
||||
def ceph_pool_list(config, limit):
|
||||
"""
|
||||
Get list information about Ceph OSDs (limited by {limit})
|
||||
Get list information about Ceph pools (limited by {limit})
|
||||
|
||||
API endpoint: GET /api/v1/storage/ceph/pool
|
||||
API arguments: limit={limit}
|
||||
@ -726,15 +726,15 @@ def ceph_pool_list(config, limit):
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_pool_add(config, pool, pgs, replcfg):
|
||||
def ceph_pool_add(config, pool, pgs, replcfg, tier):
|
||||
"""
|
||||
Add new Ceph OSD
|
||||
Add new Ceph pool
|
||||
|
||||
API endpoint: POST /api/v1/storage/ceph/pool
|
||||
API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}
|
||||
API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}, tier={tier}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {"pool": pool, "pgs": pgs, "replcfg": replcfg}
|
||||
params = {"pool": pool, "pgs": pgs, "replcfg": replcfg, "tier": tier}
|
||||
response = call_api(config, "post", "/storage/ceph/pool", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
@ -747,7 +747,7 @@ def ceph_pool_add(config, pool, pgs, replcfg):
|
||||
|
||||
def ceph_pool_remove(config, pool):
|
||||
"""
|
||||
Remove Ceph OSD
|
||||
Remove Ceph pool
|
||||
|
||||
API endpoint: DELETE /api/v1/storage/ceph/pool/{pool}
|
||||
API arguments:
|
||||
@ -766,6 +766,27 @@ def ceph_pool_remove(config, pool):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def ceph_pool_set_pgs(config, pool, pgs):
|
||||
"""
|
||||
Set the PGs of a Ceph pool
|
||||
|
||||
API endpoint: PUT /api/v1/storage/ceph/pool/{pool}
|
||||
API arguments: {"pgs": "{pgs}"}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
params = {"pgs": pgs}
|
||||
response = call_api(
|
||||
config, "put", "/storage/ceph/pool/{pool}".format(pool=pool), params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_list_pool(pool_list):
|
||||
# Handle empty list
|
||||
if not pool_list:
|
||||
@ -775,6 +796,8 @@ def format_list_pool(pool_list):
|
||||
|
||||
pool_name_length = 5
|
||||
pool_id_length = 3
|
||||
pool_tier_length = 5
|
||||
pool_pgs_length = 4
|
||||
pool_used_length = 5
|
||||
pool_usedpct_length = 6
|
||||
pool_free_length = 5
|
||||
@ -812,6 +835,16 @@ def format_list_pool(pool_list):
|
||||
if _pool_id_length > pool_id_length:
|
||||
pool_id_length = _pool_id_length
|
||||
|
||||
# Set the tier and length
|
||||
_pool_tier_length = len(str(pool_information["tier"])) + 1
|
||||
if _pool_tier_length > pool_tier_length:
|
||||
pool_tier_length = _pool_tier_length
|
||||
|
||||
# Set the pgs and length
|
||||
_pool_pgs_length = len(str(pool_information["pgs"])) + 1
|
||||
if _pool_pgs_length > pool_pgs_length:
|
||||
pool_pgs_length = _pool_pgs_length
|
||||
|
||||
# Set the used and length
|
||||
_pool_used_length = len(str(pool_information["stats"]["used_bytes"])) + 1
|
||||
if _pool_used_length > pool_used_length:
|
||||
@ -879,10 +912,12 @@ def format_list_pool(pool_list):
|
||||
end_bold=ansiprint.end(),
|
||||
pool_header_length=pool_id_length
|
||||
+ pool_name_length
|
||||
+ pool_tier_length
|
||||
+ pool_pgs_length
|
||||
+ pool_used_length
|
||||
+ pool_usedpct_length
|
||||
+ pool_free_length
|
||||
+ 4,
|
||||
+ 6,
|
||||
objects_header_length=pool_num_objects_length
|
||||
+ pool_num_clones_length
|
||||
+ pool_num_copies_length
|
||||
@ -898,10 +933,12 @@ def format_list_pool(pool_list):
|
||||
6,
|
||||
pool_id_length
|
||||
+ pool_name_length
|
||||
+ pool_tier_length
|
||||
+ pool_pgs_length
|
||||
+ pool_used_length
|
||||
+ pool_usedpct_length
|
||||
+ pool_free_length
|
||||
+ 3,
|
||||
+ 5,
|
||||
)
|
||||
]
|
||||
),
|
||||
@ -934,6 +971,8 @@ def format_list_pool(pool_list):
|
||||
"{bold}\
|
||||
{pool_id: <{pool_id_length}} \
|
||||
{pool_name: <{pool_name_length}} \
|
||||
{pool_tier: <{pool_tier_length}} \
|
||||
{pool_pgs: <{pool_pgs_length}} \
|
||||
{pool_used: <{pool_used_length}} \
|
||||
{pool_usedpct: <{pool_usedpct_length}} \
|
||||
{pool_free: <{pool_free_length}} \
|
||||
@ -950,6 +989,8 @@ def format_list_pool(pool_list):
|
||||
end_bold=ansiprint.end(),
|
||||
pool_id_length=pool_id_length,
|
||||
pool_name_length=pool_name_length,
|
||||
pool_tier_length=pool_tier_length,
|
||||
pool_pgs_length=pool_pgs_length,
|
||||
pool_used_length=pool_used_length,
|
||||
pool_usedpct_length=pool_usedpct_length,
|
||||
pool_free_length=pool_free_length,
|
||||
@ -963,6 +1004,8 @@ def format_list_pool(pool_list):
|
||||
pool_read_data_length=pool_read_data_length,
|
||||
pool_id="ID",
|
||||
pool_name="Name",
|
||||
pool_tier="Tier",
|
||||
pool_pgs="PGs",
|
||||
pool_used="Used",
|
||||
pool_usedpct="Used%",
|
||||
pool_free="Free",
|
||||
@ -983,6 +1026,8 @@ def format_list_pool(pool_list):
|
||||
"{bold}\
|
||||
{pool_id: <{pool_id_length}} \
|
||||
{pool_name: <{pool_name_length}} \
|
||||
{pool_tier: <{pool_tier_length}} \
|
||||
{pool_pgs: <{pool_pgs_length}} \
|
||||
{pool_used: <{pool_used_length}} \
|
||||
{pool_usedpct: <{pool_usedpct_length}} \
|
||||
{pool_free: <{pool_free_length}} \
|
||||
@ -999,6 +1044,8 @@ def format_list_pool(pool_list):
|
||||
end_bold="",
|
||||
pool_id_length=pool_id_length,
|
||||
pool_name_length=pool_name_length,
|
||||
pool_tier_length=pool_tier_length,
|
||||
pool_pgs_length=pool_pgs_length,
|
||||
pool_used_length=pool_used_length,
|
||||
pool_usedpct_length=pool_usedpct_length,
|
||||
pool_free_length=pool_free_length,
|
||||
@ -1012,6 +1059,8 @@ def format_list_pool(pool_list):
|
||||
pool_read_data_length=pool_read_data_length,
|
||||
pool_id=pool_information["stats"]["id"],
|
||||
pool_name=pool_information["name"],
|
||||
pool_tier=pool_information["tier"],
|
||||
pool_pgs=pool_information["pgs"],
|
||||
pool_used=pool_information["stats"]["used_bytes"],
|
||||
pool_usedpct=pool_information["stats"]["used_percent"],
|
||||
pool_free=pool_information["stats"]["free_bytes"],
|
||||
|
@ -3507,6 +3507,17 @@ def ceph_pool():
|
||||
@click.command(name="add", short_help="Add new RBD pool.")
|
||||
@click.argument("name")
|
||||
@click.argument("pgs")
|
||||
@click.option(
|
||||
"-t",
|
||||
"--tier",
|
||||
"tier",
|
||||
default="default",
|
||||
show_default=True,
|
||||
type=click.Choice(["default", "hdd", "ssd", "nvme"]),
|
||||
help="""
|
||||
The device tier to limit the pool to. Default is all OSD tiers, and specific tiers can be specified instead. At least one full set of OSDs for a given tier must be present for the tier to be specified, or the pool creation will fail.
|
||||
""",
|
||||
)
|
||||
@click.option(
|
||||
"--replcfg",
|
||||
"replcfg",
|
||||
@ -3514,20 +3525,18 @@ def ceph_pool():
|
||||
show_default=True,
|
||||
required=False,
|
||||
help="""
|
||||
The replication configuration, specifying both a "copies" and "mincopies" value, separated by a
|
||||
comma, e.g. "copies=3,mincopies=2". The "copies" value specifies the total number of replicas
|
||||
and should not exceed the total number of nodes; the "mincopies" value specifies the minimum
|
||||
number of available copies to allow writes. For additional details please see the Cluster
|
||||
Architecture documentation.
|
||||
The replication configuration, specifying both a "copies" and "mincopies" value, separated by a comma, e.g. "copies=3,mincopies=2". The "copies" value specifies the total number of replicas and should not exceed the total number of nodes; the "mincopies" value specifies the minimum number of available copies to allow writes. For additional details please see the Cluster Architecture documentation.
|
||||
""",
|
||||
)
|
||||
@cluster_req
|
||||
def ceph_pool_add(name, pgs, replcfg):
|
||||
def ceph_pool_add(name, pgs, tier, replcfg):
|
||||
"""
|
||||
Add a new Ceph RBD pool with name NAME and PGS placement groups.
|
||||
|
||||
The placement group count must be a non-zero power of 2.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg)
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg, tier)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
@ -3563,6 +3572,26 @@ def ceph_pool_remove(name, confirm_flag):
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage pool set-pgs
|
||||
###############################################################################
|
||||
@click.command(name="set-pgs", short_help="Set PGs of an RBD pool.")
|
||||
@click.argument("name")
|
||||
@click.argument("pgs")
|
||||
@cluster_req
|
||||
def ceph_pool_set_pgs(name, pgs):
|
||||
"""
|
||||
Set the placement groups (PGs) count for the pool NAME to PGS.
|
||||
|
||||
The placement group count must be a non-zero power of 2.
|
||||
|
||||
Placement group counts may be increased or decreased as required though frequent alteration is not recommended.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_set_pgs(config, name, pgs)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# pvc storage pool list
|
||||
###############################################################################
|
||||
@ -5837,6 +5866,7 @@ ceph_osd.add_command(ceph_osd_list)
|
||||
|
||||
ceph_pool.add_command(ceph_pool_add)
|
||||
ceph_pool.add_command(ceph_pool_remove)
|
||||
ceph_pool.add_command(ceph_pool_set_pgs)
|
||||
ceph_pool.add_command(ceph_pool_list)
|
||||
|
||||
ceph_volume.add_command(ceph_volume_add)
|
||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.46",
|
||||
version="0.9.47",
|
||||
packages=["pvc", "pvc.cli_lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
|
@ -393,12 +393,22 @@ def getPoolInformation(zkhandler, pool):
|
||||
pool_stats_raw = zkhandler.read(("pool.stats", pool))
|
||||
pool_stats = dict(json.loads(pool_stats_raw))
|
||||
volume_count = len(getCephVolumes(zkhandler, pool))
|
||||
tier = zkhandler.read(("pool.tier", pool))
|
||||
if tier is None:
|
||||
tier = "default"
|
||||
pgs = zkhandler.read(("pool.pgs", pool))
|
||||
|
||||
pool_information = {"name": pool, "volume_count": volume_count, "stats": pool_stats}
|
||||
pool_information = {
|
||||
"name": pool,
|
||||
"volume_count": volume_count,
|
||||
"tier": tier,
|
||||
"pgs": pgs,
|
||||
"stats": pool_stats,
|
||||
}
|
||||
return pool_information
|
||||
|
||||
|
||||
def add_pool(zkhandler, name, pgs, replcfg):
|
||||
def add_pool(zkhandler, name, pgs, replcfg, tier=None):
|
||||
# Prepare the copies/mincopies variables
|
||||
try:
|
||||
copies, mincopies = replcfg.split(",")
|
||||
@ -408,60 +418,70 @@ def add_pool(zkhandler, name, pgs, replcfg):
|
||||
copies = None
|
||||
mincopies = None
|
||||
if not copies or not mincopies:
|
||||
return False, 'ERROR: Replication configuration "{}" is not valid.'.format(
|
||||
replcfg
|
||||
)
|
||||
return False, f'ERROR: Replication configuration "{replcfg}" is not valid.'
|
||||
|
||||
# 1. Create the pool
|
||||
# Prepare the tiers if applicable
|
||||
if tier is not None and tier in ["hdd", "ssd", "nvme"]:
|
||||
crush_rule = f"{tier}_tier"
|
||||
# Create a CRUSH rule for the relevant tier
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd pool create {} {} replicated".format(name, pgs)
|
||||
)
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to create pool "{}" with {} PGs: {}'.format(
|
||||
name, pgs, stderr
|
||||
)
|
||||
|
||||
# 2. Set the size and minsize
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd pool set {} size {}".format(name, copies)
|
||||
)
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to set pool "{}" size of {}: {}'.format(
|
||||
name, copies, stderr
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd pool set {} min_size {}".format(name, mincopies)
|
||||
)
|
||||
if retcode:
|
||||
return False, 'ERROR: Failed to set pool "{}" minimum size of {}: {}'.format(
|
||||
name, mincopies, stderr
|
||||
)
|
||||
|
||||
# 3. Enable RBD application
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
"ceph osd pool application enable {} rbd".format(name)
|
||||
f"ceph osd crush rule create-replicated {crush_rule} default host {tier}"
|
||||
)
|
||||
if retcode:
|
||||
return (
|
||||
False,
|
||||
'ERROR: Failed to enable RBD application on pool "{}" : {}'.format(
|
||||
name, stderr
|
||||
),
|
||||
f"ERROR: Failed to create CRUSH rule {tier} for pool {name}: {stderr}",
|
||||
)
|
||||
else:
|
||||
tier = "default"
|
||||
crush_rule = "replicated"
|
||||
|
||||
# Create the pool
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool create {name} {pgs} {pgs} {crush_rule}"
|
||||
)
|
||||
if retcode:
|
||||
return False, f'ERROR: Failed to create pool "{name}" with {pgs} PGs: {stderr}'
|
||||
|
||||
# Set the size and minsize
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} size {copies}"
|
||||
)
|
||||
if retcode:
|
||||
return False, f'ERROR: Failed to set pool "{name}" size of {copies}: {stderr}'
|
||||
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} min_size {mincopies}"
|
||||
)
|
||||
if retcode:
|
||||
return (
|
||||
False,
|
||||
f'ERROR: Failed to set pool "{name}" minimum size of {mincopies}: {stderr}',
|
||||
)
|
||||
|
||||
# 4. Add the new pool to Zookeeper
|
||||
# Enable RBD application
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool application enable {name} rbd"
|
||||
)
|
||||
if retcode:
|
||||
return (
|
||||
False,
|
||||
f'ERROR: Failed to enable RBD application on pool "{name}" : {stderr}',
|
||||
)
|
||||
|
||||
# Add the new pool to Zookeeper
|
||||
zkhandler.write(
|
||||
[
|
||||
(("pool", name), ""),
|
||||
(("pool.pgs", name), pgs),
|
||||
(("pool.tier", name), tier),
|
||||
(("pool.stats", name), "{}"),
|
||||
(("volume", name), ""),
|
||||
(("snapshot", name), ""),
|
||||
]
|
||||
)
|
||||
|
||||
return True, 'Created RBD pool "{}" with {} PGs'.format(name, pgs)
|
||||
return True, f'Created RBD pool "{name}" with {pgs} PGs'
|
||||
|
||||
|
||||
def remove_pool(zkhandler, name):
|
||||
@ -493,6 +513,47 @@ def remove_pool(zkhandler, name):
|
||||
return True, 'Removed RBD pool "{}" and all volumes.'.format(name)
|
||||
|
||||
|
||||
def set_pgs_pool(zkhandler, name, pgs):
|
||||
if not verifyPool(zkhandler, name):
|
||||
return False, f'ERROR: No pool with name "{name}" is present in the cluster.'
|
||||
|
||||
# Validate new PGs count
|
||||
pgs = int(pgs)
|
||||
if (pgs == 0) or (pgs & (pgs - 1) != 0):
|
||||
return (
|
||||
False,
|
||||
f'ERROR: Invalid PGs number "{pgs}": must be a non-zero power of 2.',
|
||||
)
|
||||
|
||||
# Set the new pgs number
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} pg_num {pgs}"
|
||||
)
|
||||
if retcode:
|
||||
return False, f"ERROR: Failed to set pg_num on pool {name} to {pgs}: {stderr}"
|
||||
|
||||
# Set the new pgps number if increasing
|
||||
current_pgs = int(zkhandler.read(("pool.pgs", name)))
|
||||
if current_pgs >= pgs:
|
||||
retcode, stdout, stderr = common.run_os_command(
|
||||
f"ceph osd pool set {name} pgp_num {pgs}"
|
||||
)
|
||||
if retcode:
|
||||
return (
|
||||
False,
|
||||
f"ERROR: Failed to set pg_num on pool {name} to {pgs}: {stderr}",
|
||||
)
|
||||
|
||||
# Update Zookeeper count
|
||||
zkhandler.write(
|
||||
[
|
||||
(("pool.pgs", name), pgs),
|
||||
]
|
||||
)
|
||||
|
||||
return True, f'Set PGs count to {pgs} for RBD pool "{name}".'
|
||||
|
||||
|
||||
def get_list_pool(zkhandler, limit, is_fuzzy=True):
|
||||
full_pool_list = zkhandler.children("base.pool")
|
||||
|
||||
@ -526,7 +587,7 @@ def get_list_pool(zkhandler, limit, is_fuzzy=True):
|
||||
for future in futures:
|
||||
pool_data_list.append(future.result())
|
||||
|
||||
return True, sorted(pool_data_list, key=lambda x: int(x["stats"]["id"]))
|
||||
return True, sorted(pool_data_list, key=lambda x: int(x["stats"].get("id", 0)))
|
||||
|
||||
|
||||
#
|
||||
|
1
daemon-common/migrations/versions/7.json
Normal file
1
daemon-common/migrations/versions/7.json
Normal file
@ -0,0 +1 @@
|
||||
{"version": "7", "root": "", "base": {"root": "", "schema": "/schema", "schema.version": "/schema/version", "config": "/config", "config.maintenance": "/config/maintenance", "config.primary_node": "/config/primary_node", "config.primary_node.sync_lock": "/config/primary_node/sync_lock", "config.upstream_ip": "/config/upstream_ip", "config.migration_target_selector": "/config/migration_target_selector", "cmd": "/cmd", "cmd.node": "/cmd/nodes", "cmd.domain": "/cmd/domains", "cmd.ceph": "/cmd/ceph", "logs": "/logs", "node": "/nodes", "domain": "/domains", "network": "/networks", "storage": "/ceph", "storage.util": "/ceph/util", "osd": "/ceph/osds", "pool": "/ceph/pools", "volume": "/ceph/volumes", "snapshot": "/ceph/snapshots"}, "logs": {"node": "", "messages": "/messages"}, "node": {"name": "", "keepalive": "/keepalive", "mode": "/daemonmode", "data.active_schema": "/activeschema", "data.latest_schema": "/latestschema", "data.static": "/staticdata", "data.pvc_version": "/pvcversion", "running_domains": "/runningdomains", "count.provisioned_domains": "/domainscount", "count.networks": "/networkscount", "state.daemon": "/daemonstate", "state.router": "/routerstate", "state.domain": "/domainstate", "cpu.load": "/cpuload", "vcpu.allocated": "/vcpualloc", "memory.total": "/memtotal", "memory.used": "/memused", "memory.free": "/memfree", "memory.allocated": "/memalloc", "memory.provisioned": "/memprov", "ipmi.hostname": "/ipmihostname", "ipmi.username": "/ipmiusername", "ipmi.password": "/ipmipassword", "sriov": "/sriov", "sriov.pf": "/sriov/pf", "sriov.vf": "/sriov/vf"}, "sriov_pf": {"phy": "", "mtu": "/mtu", "vfcount": "/vfcount"}, "sriov_vf": {"phy": "", "pf": "/pf", "mtu": "/mtu", "mac": "/mac", "phy_mac": "/phy_mac", "config": "/config", "config.vlan_id": "/config/vlan_id", "config.vlan_qos": "/config/vlan_qos", "config.tx_rate_min": "/config/tx_rate_min", "config.tx_rate_max": "/config/tx_rate_max", "config.spoof_check": "/config/spoof_check", "config.link_state": "/config/link_state", "config.trust": "/config/trust", "config.query_rss": "/config/query_rss", "pci": "/pci", "pci.domain": "/pci/domain", "pci.bus": "/pci/bus", "pci.slot": "/pci/slot", "pci.function": "/pci/function", "used": "/used", "used_by": "/used_by"}, "domain": {"name": "", "xml": "/xml", "state": "/state", "profile": "/profile", "stats": "/stats", "node": "/node", "last_node": "/lastnode", "failed_reason": "/failedreason", "storage.volumes": "/rbdlist", "console.log": "/consolelog", "console.vnc": "/vnc", "meta.autostart": "/node_autostart", "meta.migrate_method": "/migration_method", "meta.node_selector": "/node_selector", "meta.node_limit": "/node_limit", "meta.tags": "/tags", "migrate.sync_lock": "/migrate_sync_lock"}, "tag": {"name": "", "type": "/type", "protected": "/protected"}, "network": {"vni": "", "type": "/nettype", "mtu": "/mtu", "rule": "/firewall_rules", "rule.in": "/firewall_rules/in", "rule.out": "/firewall_rules/out", "nameservers": "/name_servers", "domain": "/domain", "reservation": "/dhcp4_reservations", "lease": "/dhcp4_leases", "ip4.gateway": "/ip4_gateway", "ip4.network": "/ip4_network", "ip4.dhcp": "/dhcp4_flag", "ip4.dhcp_start": "/dhcp4_start", "ip4.dhcp_end": "/dhcp4_end", "ip6.gateway": "/ip6_gateway", "ip6.network": "/ip6_network", "ip6.dhcp": "/dhcp6_flag"}, "reservation": {"mac": "", "ip": "/ipaddr", "hostname": "/hostname"}, "lease": {"mac": "", "ip": "/ipaddr", "hostname": "/hostname", "expiry": "/expiry", "client_id": "/clientid"}, "rule": {"description": "", "rule": "/rule", "order": "/order"}, "osd": {"id": "", "node": "/node", "device": "/device", "db_device": "/db_device", "stats": "/stats"}, "pool": {"name": "", "pgs": "/pgs", "tier": "/tier", "stats": "/stats"}, "volume": {"name": "", "stats": "/stats"}, "snapshot": {"name": "", "stats": "/stats"}}
|
@ -540,7 +540,7 @@ class ZKHandler(object):
|
||||
#
|
||||
class ZKSchema(object):
|
||||
# Current version
|
||||
_version = 6
|
||||
_version = 7
|
||||
|
||||
# Root for doing nested keys
|
||||
_schema_root = ""
|
||||
@ -703,7 +703,12 @@ class ZKSchema(object):
|
||||
"stats": "/stats",
|
||||
},
|
||||
# The schema of an individual pool entry (/ceph/pools/{pool_name})
|
||||
"pool": {"name": "", "pgs": "/pgs", "stats": "/stats"}, # The root key
|
||||
"pool": {
|
||||
"name": "",
|
||||
"pgs": "/pgs",
|
||||
"tier": "/tier",
|
||||
"stats": "/stats",
|
||||
}, # The root key
|
||||
# The schema of an individual volume entry (/ceph/volumes/{pool_name}/{volume_name})
|
||||
"volume": {"name": "", "stats": "/stats"}, # The root key
|
||||
# The schema of an individual snapshot entry (/ceph/volumes/{pool_name}/{volume_name}/{snapshot_name})
|
||||
@ -938,8 +943,13 @@ class ZKSchema(object):
|
||||
kpath = f"{elem}.{ikey}"
|
||||
# Validate that the key exists for that child
|
||||
if not zkhandler.zk_conn.exists(self.path(kpath, child)):
|
||||
if elem == "pool" and ikey == "tier":
|
||||
default_data = "default"
|
||||
else:
|
||||
default_data = ""
|
||||
zkhandler.zk_conn.create(
|
||||
self.path(kpath, child), "".encode(zkhandler.encoding)
|
||||
self.path(kpath, child),
|
||||
default_data.encode(zkhandler.encoding),
|
||||
)
|
||||
|
||||
# Continue for child keys under network (reservation, acl)
|
||||
|
9
debian/changelog
vendored
9
debian/changelog
vendored
@ -1,3 +1,12 @@
|
||||
pvc (0.9.47-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon/API/CLI] Adds Ceph pool device class/tier support
|
||||
* [API] Fixes a bug returning values if a Ceph pool has not yet reported stats
|
||||
* [API/CLI] Adds PGs count to the pool list output
|
||||
* [API/CLI] Adds Ceph pool PGs count adjustment support
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 28 Dec 2021 22:01:22 -0500
|
||||
|
||||
pvc (0.9.46-0) unstable; urgency=high
|
||||
|
||||
* [API] Fixes bugs with legacy benchmark display
|
||||
|
@ -664,6 +664,10 @@
|
||||
"description": "The name of the pool",
|
||||
"type": "string"
|
||||
},
|
||||
"pgs": {
|
||||
"description": "The number of PGs (placement groups) for the pool",
|
||||
"type": "integer"
|
||||
},
|
||||
"stats": {
|
||||
"properties": {
|
||||
"free_bytes": {
|
||||
@ -729,6 +733,10 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"tier": {
|
||||
"description": "The device class/tier of the pool",
|
||||
"type": "string"
|
||||
},
|
||||
"volume_count": {
|
||||
"description": "The number of volumes in the pool",
|
||||
"type": "integer"
|
||||
@ -5272,6 +5280,12 @@
|
||||
"name": "replcfg",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The device tier for the pool (hdd, ssd, nvme, or default)",
|
||||
"in": "query",
|
||||
"name": "tier",
|
||||
"required": false
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -5368,6 +5382,12 @@
|
||||
"name": "replcfg",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The device tier for the pool (hdd, ssd, nvme, or default)",
|
||||
"in": "query",
|
||||
"name": "tier",
|
||||
"required": false
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@ -5394,6 +5414,42 @@
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
},
|
||||
"put": {
|
||||
"description": "",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "The new number of placement groups (PGs) for the pool",
|
||||
"in": "query",
|
||||
"name": "pgs",
|
||||
"required": true,
|
||||
"type": "integer"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"description": "Bad request",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
},
|
||||
"404": {
|
||||
"description": "Not found",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/Message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"summary": "Adjust Ceph pool {pool}'s placement group count",
|
||||
"tags": [
|
||||
"storage / ceph"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/storage/ceph/snapshot": {
|
||||
|
@ -48,7 +48,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.46"
|
||||
version = "0.9.47"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
Reference in New Issue
Block a user