Add device class tiers to Ceph pools

Allows specifying a particular device class ("tier") for a given pool,
for instance SSD-only or NVMe-only. This is implemented with Crush
rules on the Ceph side, and via an additional new key in the pool
Zookeeper schema which is defaulted to "default".
This commit is contained in:
2021-12-28 20:39:50 -05:00
parent 58d57d7037
commit 25fe45dd28
8 changed files with 139 additions and 47 deletions

View File

@ -726,15 +726,15 @@ def ceph_pool_list(config, limit):
return False, response.json().get("message", "")
def ceph_pool_add(config, pool, pgs, replcfg):
def ceph_pool_add(config, pool, pgs, replcfg, tier):
"""
Add new Ceph OSD
API endpoint: POST /api/v1/storage/ceph/pool
API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}
API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}, tier={tier}
API schema: {"message":"{data}"}
"""
params = {"pool": pool, "pgs": pgs, "replcfg": replcfg}
params = {"pool": pool, "pgs": pgs, "replcfg": replcfg, "tier": tier}
response = call_api(config, "post", "/storage/ceph/pool", params=params)
if response.status_code == 200:
@ -775,6 +775,7 @@ def format_list_pool(pool_list):
pool_name_length = 5
pool_id_length = 3
pool_tier_length = 5
pool_used_length = 5
pool_usedpct_length = 6
pool_free_length = 5
@ -812,6 +813,11 @@ def format_list_pool(pool_list):
if _pool_id_length > pool_id_length:
pool_id_length = _pool_id_length
# Set the tier and length
_pool_tier_length = len(str(pool_information["tier"])) + 1
if _pool_tier_length > pool_tier_length:
pool_tier_length = _pool_tier_length
# Set the used and length
_pool_used_length = len(str(pool_information["stats"]["used_bytes"])) + 1
if _pool_used_length > pool_used_length:
@ -879,10 +885,11 @@ def format_list_pool(pool_list):
end_bold=ansiprint.end(),
pool_header_length=pool_id_length
+ pool_name_length
+ pool_tier_length
+ pool_used_length
+ pool_usedpct_length
+ pool_free_length
+ 4,
+ 5,
objects_header_length=pool_num_objects_length
+ pool_num_clones_length
+ pool_num_copies_length
@ -934,6 +941,7 @@ def format_list_pool(pool_list):
"{bold}\
{pool_id: <{pool_id_length}} \
{pool_name: <{pool_name_length}} \
{pool_tier: <{pool_tier_length}} \
{pool_used: <{pool_used_length}} \
{pool_usedpct: <{pool_usedpct_length}} \
{pool_free: <{pool_free_length}} \
@ -950,6 +958,7 @@ def format_list_pool(pool_list):
end_bold=ansiprint.end(),
pool_id_length=pool_id_length,
pool_name_length=pool_name_length,
pool_tier_length=pool_tier_length,
pool_used_length=pool_used_length,
pool_usedpct_length=pool_usedpct_length,
pool_free_length=pool_free_length,
@ -963,6 +972,7 @@ def format_list_pool(pool_list):
pool_read_data_length=pool_read_data_length,
pool_id="ID",
pool_name="Name",
pool_tier="Tier",
pool_used="Used",
pool_usedpct="Used%",
pool_free="Free",
@ -983,6 +993,7 @@ def format_list_pool(pool_list):
"{bold}\
{pool_id: <{pool_id_length}} \
{pool_name: <{pool_name_length}} \
{pool_tier: <{pool_tier_length}} \
{pool_used: <{pool_used_length}} \
{pool_usedpct: <{pool_usedpct_length}} \
{pool_free: <{pool_free_length}} \
@ -999,6 +1010,7 @@ def format_list_pool(pool_list):
end_bold="",
pool_id_length=pool_id_length,
pool_name_length=pool_name_length,
pool_tier_length=pool_tier_length,
pool_used_length=pool_used_length,
pool_usedpct_length=pool_usedpct_length,
pool_free_length=pool_free_length,
@ -1012,6 +1024,7 @@ def format_list_pool(pool_list):
pool_read_data_length=pool_read_data_length,
pool_id=pool_information["stats"]["id"],
pool_name=pool_information["name"],
pool_tier=pool_information["tier"],
pool_used=pool_information["stats"]["used_bytes"],
pool_usedpct=pool_information["stats"]["used_percent"],
pool_free=pool_information["stats"]["free_bytes"],

View File

@ -3507,6 +3507,17 @@ def ceph_pool():
@click.command(name="add", short_help="Add new RBD pool.")
@click.argument("name")
@click.argument("pgs")
@click.option(
"-t",
"--tier",
"tier",
default="default",
show_default=True,
type=click.Choice(["default", "hdd", "ssd", "nvme"]),
help="""
The device tier to limit the pool to. Default is all OSD tiers, and specific tiers can be specified instead. At least one full set of OSDs for a given tier must be present for the tier to be specified, or the pool creation will fail.
""",
)
@click.option(
"--replcfg",
"replcfg",
@ -3514,20 +3525,16 @@ def ceph_pool():
show_default=True,
required=False,
help="""
The replication configuration, specifying both a "copies" and "mincopies" value, separated by a
comma, e.g. "copies=3,mincopies=2". The "copies" value specifies the total number of replicas
and should not exceed the total number of nodes; the "mincopies" value specifies the minimum
number of available copies to allow writes. For additional details please see the Cluster
Architecture documentation.
The replication configuration, specifying both a "copies" and "mincopies" value, separated by a comma, e.g. "copies=3,mincopies=2". The "copies" value specifies the total number of replicas and should not exceed the total number of nodes; the "mincopies" value specifies the minimum number of available copies to allow writes. For additional details please see the Cluster Architecture documentation.
""",
)
@cluster_req
def ceph_pool_add(name, pgs, replcfg):
def ceph_pool_add(name, pgs, tier, replcfg):
"""
Add a new Ceph RBD pool with name NAME and PGS placement groups.
"""
retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg)
retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg, tier)
cleanup(retcode, retmsg)