]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/nldev: Add command to get FRMR pools
authorMichael Guralnik <michaelgur@nvidia.com>
Thu, 26 Feb 2026 13:52:13 +0000 (15:52 +0200)
committerLeon Romanovsky <leon@kernel.org>
Mon, 2 Mar 2026 18:45:34 +0000 (13:45 -0500)
Add support for a new command in netlink to dump to user the state of
the FRMR pools on the devices.
Expose each pool with its key and the usage statistics for it.

Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Edward Srouji <edwards@nvidia.com>
Link: https://patch.msgid.link/20260226-frmr_pools-v4-8-95360b54f15e@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/core/nldev.c
include/uapi/rdma/rdma_netlink.h

index 2220a2dfab240eaef2eb64d8e45cb221dfa25614..6637c76165be2555a732ce8e062e886f4309ce40 100644 (file)
 #include <net/netlink.h>
 #include <rdma/rdma_cm.h>
 #include <rdma/rdma_netlink.h>
+#include <rdma/frmr_pools.h>
 
 #include "core_priv.h"
 #include "cma_priv.h"
 #include "restrack.h"
 #include "uverbs.h"
+#include "frmr_pools.h"
 
 /*
  * This determines whether a non-privileged user is allowed to specify a
@@ -172,6 +174,16 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
        [RDMA_NLDEV_ATTR_NAME_ASSIGN_TYPE]      = { .type = NLA_U8 },
        [RDMA_NLDEV_ATTR_EVENT_TYPE]            = { .type = NLA_U8 },
        [RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED] = { .type = NLA_U8 },
+       [RDMA_NLDEV_ATTR_FRMR_POOLS]            = { .type = NLA_NESTED },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY]       = { .type = NLA_NESTED },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_KEY]         = { .type = NLA_NESTED },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS]     = { .type = NLA_U8 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS] = { .type = NLA_U32 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY] = { .type = NLA_U64 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS] = { .type = NLA_U64 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES] = { .type = NLA_U32 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE]  = { .type = NLA_U64 },
+       [RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE]      = { .type = NLA_U64 },
 };
 
 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -2637,6 +2649,156 @@ static int nldev_deldev(struct sk_buff *skb, struct nlmsghdr *nlh,
        return ib_del_sub_device_and_put(device);
 }
 
+static int fill_frmr_pool_key(struct sk_buff *msg, struct ib_frmr_key *key)
+{
+       struct nlattr *key_attr;
+
+       key_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY);
+       if (!key_attr)
+               return -EMSGSIZE;
+
+       if (nla_put_u8(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS, key->ats))
+               goto err;
+       if (nla_put_u32(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS,
+                       key->access_flags))
+               goto err;
+       if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY,
+                             key->vendor_key, RDMA_NLDEV_ATTR_PAD))
+               goto err;
+       if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS,
+                             key->num_dma_blocks, RDMA_NLDEV_ATTR_PAD))
+               goto err;
+
+       nla_nest_end(msg, key_attr);
+       return 0;
+
+err:
+       return -EMSGSIZE;
+}
+
+static int fill_frmr_pool_entry(struct sk_buff *msg, struct ib_frmr_pool *pool)
+{
+       if (fill_frmr_pool_key(msg, &pool->key))
+               return -EMSGSIZE;
+
+       spin_lock(&pool->lock);
+       if (nla_put_u32(msg, RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES,
+                       pool->queue.ci + pool->inactive_queue.ci))
+               goto err_unlock;
+       if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE,
+                             pool->max_in_use, RDMA_NLDEV_ATTR_PAD))
+               goto err_unlock;
+       if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE,
+                             pool->in_use, RDMA_NLDEV_ATTR_PAD))
+               goto err_unlock;
+       spin_unlock(&pool->lock);
+
+       return 0;
+
+err_unlock:
+       spin_unlock(&pool->lock);
+       return -EMSGSIZE;
+}
+
+static int nldev_frmr_pools_get_dumpit(struct sk_buff *skb,
+                                      struct netlink_callback *cb)
+{
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct ib_frmr_pools *pools;
+       int err, ret = 0, idx = 0;
+       struct ib_frmr_pool *pool;
+       struct nlattr *table_attr;
+       struct nlattr *entry_attr;
+       struct ib_device *device;
+       int start = cb->args[0];
+       struct rb_node *node;
+       struct nlmsghdr *nlh;
+       bool filled = false;
+
+       err = __nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+                           nldev_policy, NL_VALIDATE_LIBERAL, NULL);
+       if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+               return -EINVAL;
+
+       device = ib_device_get_by_index(
+               sock_net(skb->sk), nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]));
+       if (!device)
+               return -EINVAL;
+
+       pools = device->frmr_pools;
+       if (!pools) {
+               ib_device_put(device);
+               return 0;
+       }
+
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+                                        RDMA_NLDEV_CMD_FRMR_POOLS_GET),
+                       0, NLM_F_MULTI);
+
+       if (!nlh || fill_nldev_handle(skb, device)) {
+               ret = -EMSGSIZE;
+               goto err;
+       }
+
+       table_attr = nla_nest_start_noflag(skb, RDMA_NLDEV_ATTR_FRMR_POOLS);
+       if (!table_attr) {
+               ret = -EMSGSIZE;
+               goto err;
+       }
+
+       read_lock(&pools->rb_lock);
+       for (node = rb_first(&pools->rb_root); node; node = rb_next(node)) {
+               pool = rb_entry(node, struct ib_frmr_pool, node);
+               if (pool->key.kernel_vendor_key)
+                       continue;
+
+               if (idx < start) {
+                       idx++;
+                       continue;
+               }
+
+               filled = true;
+
+               entry_attr = nla_nest_start_noflag(
+                       skb, RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY);
+               if (!entry_attr) {
+                       ret = -EMSGSIZE;
+                       goto end_msg;
+               }
+
+               if (fill_frmr_pool_entry(skb, pool)) {
+                       nla_nest_cancel(skb, entry_attr);
+                       ret = -EMSGSIZE;
+                       goto end_msg;
+               }
+
+               nla_nest_end(skb, entry_attr);
+               idx++;
+       }
+end_msg:
+       read_unlock(&pools->rb_lock);
+
+       nla_nest_end(skb, table_attr);
+       nlmsg_end(skb, nlh);
+       cb->args[0] = idx;
+
+       /*
+        * No more entries to fill, cancel the message and
+        * return 0 to mark end of dumpit.
+        */
+       if (!filled)
+               goto err;
+
+       ib_device_put(device);
+       return skb->len;
+
+err:
+       nlmsg_cancel(skb, nlh);
+       ib_device_put(device);
+       return ret;
+}
+
 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
        [RDMA_NLDEV_CMD_GET] = {
                .doit = nldev_get_doit,
@@ -2743,6 +2905,9 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
                .doit = nldev_deldev,
                .flags = RDMA_NL_ADMIN_PERM,
        },
+       [RDMA_NLDEV_CMD_FRMR_POOLS_GET] = {
+               .dump = nldev_frmr_pools_get_dumpit,
+       },
 };
 
 static int fill_mon_netdev_rename(struct sk_buff *msg,
index f41f0228fcd0e0b74e74b4d87611546b00f799a1..8f17ffe0190cb86131109209c45caec155ab36da 100644 (file)
@@ -308,6 +308,8 @@ enum rdma_nldev_command {
 
        RDMA_NLDEV_CMD_MONITOR,
 
+       RDMA_NLDEV_CMD_FRMR_POOLS_GET, /* can dump */
+
        RDMA_NLDEV_NUM_OPS
 };
 
@@ -582,6 +584,21 @@ enum rdma_nldev_attr {
        RDMA_NLDEV_SYS_ATTR_MONITOR_MODE,       /* u8 */
 
        RDMA_NLDEV_ATTR_STAT_OPCOUNTER_ENABLED, /* u8 */
+
+       /*
+        * FRMR Pools attributes
+        */
+       RDMA_NLDEV_ATTR_FRMR_POOLS,             /* nested table */
+       RDMA_NLDEV_ATTR_FRMR_POOL_ENTRY,        /* nested table */
+       RDMA_NLDEV_ATTR_FRMR_POOL_KEY,          /* nested table */
+       RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ATS,      /* u8 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_KEY_ACCESS_FLAGS,     /* u32 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_KEY_VENDOR_KEY,       /* u64 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_KEY_NUM_DMA_BLOCKS,   /* u64 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES,        /* u32 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE,   /* u64 */
+       RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE,       /* u64 */
+
        /*
         * Always the end
         */