Allow users to set FRMR pools aging timer through netlink.
This functionality will allow user to control how long handles reside in
the kernel before being destroyed, thus being able to tune the tradeoff
between memory and HW object consumption and memory registration
optimization.
Since FRMR pools is highly beneficial for application restart scenarios,
this command allows users to modify the aging timer to their application
restart time, making sure the FRMR handles deregistered on application
teardown are kept for long enough in the pools for reuse in the
application startup.
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Reviewed-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Edward Srouji <edwards@nvidia.com>
Link: https://patch.msgid.link/20260226-frmr_pools-v4-9-95360b54f15e@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
if (has_work)
queue_delayed_work(
pools->aging_wq, &pool->aging_work,
- secs_to_jiffies(FRMR_POOLS_DEFAULT_AGING_PERIOD_SECS));
+ secs_to_jiffies(READ_ONCE(pools->aging_period_sec)));
}
static void destroy_frmr_pool(struct ib_device *device,
return -ENOMEM;
}
+ pools->aging_period_sec = FRMR_POOLS_DEFAULT_AGING_PERIOD_SECS;
+
device->frmr_pools = pools;
return 0;
}
}
EXPORT_SYMBOL(ib_frmr_pools_cleanup);
+int ib_frmr_pools_set_aging_period(struct ib_device *device, u32 period_sec)
+{
+ struct ib_frmr_pools *pools = device->frmr_pools;
+ struct ib_frmr_pool *pool;
+ struct rb_node *node;
+
+ if (!pools)
+ return -EINVAL;
+
+ if (period_sec == 0)
+ return -EINVAL;
+
+ WRITE_ONCE(pools->aging_period_sec, period_sec);
+
+ read_lock(&pools->rb_lock);
+ for (node = rb_first(&pools->rb_root); node; node = rb_next(node)) {
+ pool = rb_entry(node, struct ib_frmr_pool, node);
+ mod_delayed_work(pools->aging_wq, &pool->aging_work,
+ secs_to_jiffies(period_sec));
+ }
+ read_unlock(&pools->rb_lock);
+
+ return 0;
+}
+
static inline int compare_keys(struct ib_frmr_key *key1,
struct ib_frmr_key *key2)
{
if (ret == 0 && schedule_aging)
queue_delayed_work(pools->aging_wq, &pool->aging_work,
- secs_to_jiffies(FRMR_POOLS_DEFAULT_AGING_PERIOD_SECS));
+ secs_to_jiffies(READ_ONCE(pools->aging_period_sec)));
return ret;
}
const struct ib_frmr_pool_ops *pool_ops;
struct workqueue_struct *aging_wq;
+ u32 aging_period_sec;
};
int ib_frmr_pools_set_pinned(struct ib_device *device, struct ib_frmr_key *key,
u32 pinned_handles);
+int ib_frmr_pools_set_aging_period(struct ib_device *device, u32 period_sec);
#endif /* RDMA_CORE_FRMR_POOLS_H */
[RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD] = { .type = NLA_U32 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
return ret;
}
+static int nldev_frmr_pools_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 aging_period;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (err)
+ return err;
+
+ if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ if (!tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD])
+ return -EINVAL;
+
+ device = ib_device_get_by_index(
+ sock_net(skb->sk), nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]));
+ if (!device)
+ return -EINVAL;
+
+ aging_period = nla_get_u32(tb[RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD]);
+
+ err = ib_frmr_pools_set_aging_period(device, aging_period);
+
+ ib_device_put(device);
+ return err;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
[RDMA_NLDEV_CMD_FRMR_POOLS_GET] = {
.dump = nldev_frmr_pools_get_dumpit,
},
+ [RDMA_NLDEV_CMD_FRMR_POOLS_SET] = {
+ .doit = nldev_frmr_pools_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
};
static int fill_mon_netdev_rename(struct sk_buff *msg,
RDMA_NLDEV_CMD_FRMR_POOLS_GET, /* can dump */
+ RDMA_NLDEV_CMD_FRMR_POOLS_SET,
+
RDMA_NLDEV_NUM_OPS
};
RDMA_NLDEV_ATTR_FRMR_POOL_QUEUE_HANDLES, /* u32 */
RDMA_NLDEV_ATTR_FRMR_POOL_MAX_IN_USE, /* u64 */
RDMA_NLDEV_ATTR_FRMR_POOL_IN_USE, /* u64 */
+ RDMA_NLDEV_ATTR_FRMR_POOLS_AGING_PERIOD, /* u32 */
/*
* Always the end