static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags);
static void __io_req_caches_free(struct io_ring_ctx *ctx);
-static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray);
+static __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(io_key_has_sqarray, HZ);
struct kmem_cache *req_cachep;
static struct workqueue_struct *iou_wq __ro_after_init;
unsigned mask = ctx->sq_entries - 1;
unsigned head = ctx->cached_sq_head++ & mask;
- if (static_branch_unlikely(&io_key_has_sqarray) &&
+ if (static_branch_unlikely(&io_key_has_sqarray.key) &&
(!(ctx->flags & IORING_SETUP_NO_SQARRAY))) {
head = READ_ONCE(ctx->sq_array[head]);
if (unlikely(head >= ctx->sq_entries)) {
io_rings_free(ctx);
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
- static_branch_dec(&io_key_has_sqarray);
+ static_branch_slow_dec_deferred(&io_key_has_sqarray);
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
ctx->clock_offset = 0;
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
- static_branch_inc(&io_key_has_sqarray);
+ static_branch_deferred_inc(&io_key_has_sqarray);
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
!(ctx->flags & IORING_SETUP_IOPOLL))