RDMA/ionic: Implement device stats ops

Implement device stats operations for hw stats and qp stats.

Co-developed-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com>
Link: https://patch.msgid.link/20250903061606.4139957-14-abhijit.gangurde@amd.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
Abhijit Gangurde 2025-09-03 11:46:05 +05:30 committed by Leon Romanovsky
parent 2075bbe8ef
commit ea4c399642
4 changed files with 554 additions and 0 deletions

View File

@ -659,6 +659,17 @@ static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
return spec;
}
struct ionic_admin_stats_hdr {
__le64 dma_addr;
__le32 length;
__le32 id_ver;
__u8 type_state;
} __packed;
#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17
static_assert(sizeof(struct ionic_admin_stats_hdr) ==
IONIC_ADMIN_STATS_HDRS_IN_V1_LEN);
struct ionic_admin_create_ah {
__le64 dma_addr;
__le32 length;
@ -837,6 +848,7 @@ struct ionic_v1_admin_wqe {
__le16 len;
union {
struct ionic_admin_stats_hdr stats;
struct ionic_admin_create_ah create_ah;
struct ionic_admin_destroy_ah destroy_ah;
struct ionic_admin_query_ah query_ah;
@ -983,4 +995,35 @@ static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
return evt >> IONIC_V1_EQE_QID_SHIFT;
}
enum ionic_v1_stat_bits {
IONIC_V1_STAT_TYPE_SHIFT = 28,
IONIC_V1_STAT_TYPE_NONE = 0,
IONIC_V1_STAT_TYPE_8 = 1,
IONIC_V1_STAT_TYPE_LE16 = 2,
IONIC_V1_STAT_TYPE_LE32 = 3,
IONIC_V1_STAT_TYPE_LE64 = 4,
IONIC_V1_STAT_TYPE_BE16 = 5,
IONIC_V1_STAT_TYPE_BE32 = 6,
IONIC_V1_STAT_TYPE_BE64 = 7,
IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1,
};
struct ionic_v1_stat {
union {
__be32 be_type_off;
u32 type_off;
};
char name[28];
};
static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr)
{
return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT;
}
static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr)
{
return hdr->type_off & IONIC_V1_STAT_OFF_MASK;
}
#endif /* _IONIC_FW_H_ */

View File

@ -0,0 +1,484 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
#include <linux/dma-mapping.h>
#include "ionic_fw.h"
#include "ionic_ibdev.h"
static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
int hw_stats_count)
{
int hw_stat_i;
for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
stat->type_off = be32_to_cpu(stat->be_type_off);
stat->name[sizeof(stat->name) - 1] = 0;
if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
break;
}
return hw_stat_i;
}
static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
struct ionic_v1_stat *hw_stats,
int hw_stats_count)
{
int hw_stat_i;
for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
hw_stats_hdrs[hw_stat_i].name = stat->name;
}
}
static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
void *vals_buf, size_t vals_len)
{
unsigned int off = ionic_v1_stat_off(stat);
int type = ionic_v1_stat_type(stat);
#define __ionic_v1_stat_validate(__type) \
((off + sizeof(__type) <= vals_len) && \
(IS_ALIGNED(off, sizeof(__type))))
switch (type) {
case IONIC_V1_STAT_TYPE_8:
if (__ionic_v1_stat_validate(u8))
return *(u8 *)(vals_buf + off);
break;
case IONIC_V1_STAT_TYPE_LE16:
if (__ionic_v1_stat_validate(__le16))
return le16_to_cpu(*(__le16 *)(vals_buf + off));
break;
case IONIC_V1_STAT_TYPE_LE32:
if (__ionic_v1_stat_validate(__le32))
return le32_to_cpu(*(__le32 *)(vals_buf + off));
break;
case IONIC_V1_STAT_TYPE_LE64:
if (__ionic_v1_stat_validate(__le64))
return le64_to_cpu(*(__le64 *)(vals_buf + off));
break;
case IONIC_V1_STAT_TYPE_BE16:
if (__ionic_v1_stat_validate(__be16))
return be16_to_cpu(*(__be16 *)(vals_buf + off));
break;
case IONIC_V1_STAT_TYPE_BE32:
if (__ionic_v1_stat_validate(__be32))
return be32_to_cpu(*(__be32 *)(vals_buf + off));
break;
case IONIC_V1_STAT_TYPE_BE64:
if (__ionic_v1_stat_validate(__be64))
return be64_to_cpu(*(__be64 *)(vals_buf + off));
break;
}
return ~0ull;
#undef __ionic_v1_stat_validate
}
static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
dma_addr_t dma, size_t len, int qid, int op)
{
struct ionic_admin_wr wr = {
.work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
.wqe = {
.op = op,
.len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
.cmd.stats = {
.dma_addr = cpu_to_le64(dma),
.length = cpu_to_le32(len),
.id_ver = cpu_to_le32(qid),
},
}
};
if (dev->lif_cfg.admin_opcodes <= op)
return -EBADRQC;
ionic_admin_post(dev, &wr);
return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
}
static int ionic_init_hw_stats(struct ionic_ibdev *dev)
{
dma_addr_t hw_stats_dma;
int rc, hw_stats_count;
if (dev->hw_stats_hdrs)
return 0;
dev->hw_stats_count = 0;
/* buffer for current values from the device */
dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!dev->hw_stats_buf) {
rc = -ENOMEM;
goto err_buf;
}
/* buffer for names, sizes, offsets of values */
dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!dev->hw_stats) {
rc = -ENOMEM;
goto err_hw_stats;
}
/* request the names, sizes, offsets */
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
PAGE_SIZE, DMA_FROM_DEVICE);
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
if (rc)
goto err_dma;
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
IONIC_V1_ADMIN_STATS_HDRS);
if (rc)
goto err_cmd;
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* normalize and count the number of hw_stats */
hw_stats_count =
ionic_v1_stat_normalize(dev->hw_stats,
PAGE_SIZE / sizeof(*dev->hw_stats));
if (!hw_stats_count) {
rc = -ENODATA;
goto err_dma;
}
dev->hw_stats_count = hw_stats_count;
/* alloc and init array of names, for alloc_hw_stats */
dev->hw_stats_hdrs = kcalloc(hw_stats_count,
sizeof(*dev->hw_stats_hdrs),
GFP_KERNEL);
if (!dev->hw_stats_hdrs) {
rc = -ENOMEM;
goto err_dma;
}
ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
hw_stats_count);
return 0;
err_cmd:
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
err_dma:
kfree(dev->hw_stats);
err_hw_stats:
kfree(dev->hw_stats_buf);
err_buf:
dev->hw_stats_count = 0;
dev->hw_stats = NULL;
dev->hw_stats_buf = NULL;
dev->hw_stats_hdrs = NULL;
return rc;
}
static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
u32 port)
{
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
if (port != 1)
return NULL;
return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
dev->hw_stats_count,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int ionic_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *hw_stats,
u32 port, int index)
{
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
dma_addr_t hw_stats_dma;
int rc, hw_stat_i;
if (port != 1)
return -EINVAL;
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
PAGE_SIZE, DMA_FROM_DEVICE);
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
if (rc)
goto err_dma;
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
0, IONIC_V1_ADMIN_STATS_VALS);
if (rc)
goto err_cmd;
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
hw_stats->value[hw_stat_i] =
ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
dev->hw_stats_buf, PAGE_SIZE);
return hw_stat_i;
err_cmd:
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
err_dma:
return rc;
}
static struct rdma_hw_stats *
ionic_counter_alloc_stats(struct rdma_counter *counter)
{
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
struct ionic_counter *cntr;
int err;
cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
if (!cntr)
return NULL;
/* buffer for current values from the device */
cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cntr->vals)
goto err_vals;
err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
cntr,
XA_LIMIT(0, IONIC_MAX_QPID),
GFP_KERNEL);
if (err)
goto err_xa;
INIT_LIST_HEAD(&cntr->qp_list);
return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
dev->counter_stats->queue_stats_count,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
err_xa:
kfree(cntr->vals);
err_vals:
kfree(cntr);
return NULL;
}
static int ionic_counter_dealloc(struct rdma_counter *counter)
{
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
struct ionic_counter *cntr;
cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
if (!cntr)
return -EINVAL;
kfree(cntr->vals);
kfree(cntr);
return 0;
}
static int ionic_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *ibqp,
u32 port)
{
struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
struct ionic_qp *qp = to_ionic_qp(ibqp);
struct ionic_counter *cntr;
cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
if (!cntr)
return -EINVAL;
list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
ibqp->counter = counter;
return 0;
}
static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
{
struct ionic_qp *qp = to_ionic_qp(ibqp);
if (ibqp->counter) {
list_del(&qp->qp_list_counter);
ibqp->counter = NULL;
}
return 0;
}
static int ionic_get_qp_stats(struct ib_device *ibdev,
struct rdma_hw_stats *hw_stats,
u32 counter_id)
{
struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
struct ionic_counter_stats *cs;
struct ionic_counter *cntr;
dma_addr_t hw_stats_dma;
struct ionic_qp *qp;
int rc, stat_i = 0;
cs = dev->counter_stats;
cntr = xa_load(&cs->xa_counters, counter_id);
if (!cntr)
return -EINVAL;
hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
PAGE_SIZE, DMA_FROM_DEVICE);
rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
if (rc)
return rc;
memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
qp->qpid,
IONIC_V1_ADMIN_QP_STATS_VALS);
if (rc)
goto err_cmd;
for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
hw_stats->value[stat_i] +=
ionic_v1_stat_val(&cs->hdr[stat_i],
cntr->vals,
PAGE_SIZE);
}
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
return stat_i;
err_cmd:
dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
return rc;
}
static int ionic_counter_update_stats(struct rdma_counter *counter)
{
return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
}
static int ionic_alloc_counters(struct ionic_ibdev *dev)
{
struct ionic_counter_stats *cs = dev->counter_stats;
int rc, hw_stats_count;
dma_addr_t hdr_dma;
/* buffer for names, sizes, offsets of values */
cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cs->hdr)
return -ENOMEM;
hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
PAGE_SIZE, DMA_FROM_DEVICE);
rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
if (rc)
goto err_dma;
rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
IONIC_V1_ADMIN_QP_STATS_HDRS);
if (rc)
goto err_cmd;
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* normalize and count the number of hw_stats */
hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
PAGE_SIZE / sizeof(*cs->hdr));
if (!hw_stats_count) {
rc = -ENODATA;
goto err_dma;
}
cs->queue_stats_count = hw_stats_count;
/* alloc and init array of names */
cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
GFP_KERNEL);
if (!cs->stats_hdrs) {
rc = -ENOMEM;
goto err_dma;
}
ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
return 0;
err_cmd:
dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
err_dma:
kfree(cs->hdr);
return rc;
}
static const struct ib_device_ops ionic_hw_stats_ops = {
.driver_id = RDMA_DRIVER_IONIC,
.alloc_hw_port_stats = ionic_alloc_hw_stats,
.get_hw_stats = ionic_get_hw_stats,
};
static const struct ib_device_ops ionic_counter_stats_ops = {
.counter_alloc_stats = ionic_counter_alloc_stats,
.counter_dealloc = ionic_counter_dealloc,
.counter_bind_qp = ionic_counter_bind_qp,
.counter_unbind_qp = ionic_counter_unbind_qp,
.counter_update_stats = ionic_counter_update_stats,
};
void ionic_stats_init(struct ionic_ibdev *dev)
{
u16 stats_type = dev->lif_cfg.stats_type;
int rc;
if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
rc = ionic_init_hw_stats(dev);
if (rc)
ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
else
ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
}
if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
GFP_KERNEL);
if (!dev->counter_stats)
return;
rc = ionic_alloc_counters(dev);
if (rc) {
ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
kfree(dev->counter_stats);
dev->counter_stats = NULL;
return;
}
xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
}
}
void ionic_stats_cleanup(struct ionic_ibdev *dev)
{
if (dev->counter_stats) {
xa_destroy(&dev->counter_stats->xa_counters);
kfree(dev->counter_stats->hdr);
kfree(dev->counter_stats->stats_hdrs);
kfree(dev->counter_stats);
dev->counter_stats = NULL;
}
kfree(dev->hw_stats);
kfree(dev->hw_stats_buf);
kfree(dev->hw_stats_hdrs);
}

View File

@ -289,6 +289,7 @@ static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
{
ionic_kill_rdma_admin(dev, false);
ib_unregister_device(&dev->ibdev);
ionic_stats_cleanup(dev);
ionic_destroy_rdma_admin(dev);
ionic_destroy_resids(dev);
WARN_ON(!xa_empty(&dev->qp_tbl));
@ -346,6 +347,8 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
ionic_stats_init(dev);
rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
if (rc)
goto err_register;
@ -353,6 +356,7 @@ static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
return dev;
err_register:
ionic_stats_cleanup(dev);
err_admin:
ionic_kill_rdma_admin(dev, false);
ionic_destroy_rdma_admin(dev);

View File

@ -30,6 +30,7 @@
#define IONIC_PKEY_TBL_LEN 1
#define IONIC_GID_TBL_LEN 256
#define IONIC_MAX_QPID 0xffffff
#define IONIC_SPEC_HIGH 8
#define IONIC_MAX_PD 1024
#define IONIC_SPEC_HIGH 8
@ -109,6 +110,12 @@ struct ionic_ibdev {
atomic_t admin_state;
struct ionic_eq **eq_vec;
struct ionic_v1_stat *hw_stats;
void *hw_stats_buf;
struct rdma_stat_desc *hw_stats_hdrs;
struct ionic_counter_stats *counter_stats;
int hw_stats_count;
};
struct ionic_eq {
@ -320,6 +327,18 @@ struct ionic_mr {
bool created;
};
struct ionic_counter_stats {
int queue_stats_count;
struct ionic_v1_stat *hdr;
struct rdma_stat_desc *stats_hdrs;
struct xarray xa_counters;
};
struct ionic_counter {
void *vals;
struct list_head qp_list;
};
static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct ionic_ibdev, ibdev);
@ -480,6 +499,10 @@ int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/* ionic_hw_stats.c */
void ionic_stats_init(struct ionic_ibdev *dev);
void ionic_stats_cleanup(struct ionic_ibdev *dev);
/* ionic_pgtbl.c */
__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);