const struct blk_mq_debugfs_attr *attr)
{
lockdep_assert_held(&q->debugfs_mutex);
- /*
- * Creating new debugfs entries with queue freezed has the risk of
- * deadlock.
- */
- WARN_ON_ONCE(q->mq_freeze_depth != 0);
/*
* debugfs_mutex should not be nested under other locks that can be
* grabbed while queue is frozen.
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
+ unsigned int memflags;
unsigned long i;
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_register_hctx(q, hctx);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
}
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
void blk_mq_sched_reg_debugfs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
+ unsigned int memflags;
unsigned long i;
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_register_sched_hctx(q, hctx);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
}
void blk_mq_sched_unreg_debugfs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned long i;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_unregister_sched_hctx(hctx);
blk_mq_debugfs_unregister_sched(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
}
void blk_mq_free_sched_tags(struct elevator_tags *et,
{
struct request_queue *q = disk->queue;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
blk_trace_shutdown(q);
debugfs_remove_recursive(q->debugfs_dir);
q->debugfs_dir = NULL;
q->sched_debugfs_dir = NULL;
q->rqos_debugfs_dir = NULL;
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
}
/**
int blk_register_queue(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
+ unsigned int memflags;
int ret;
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
}
mutex_lock(&q->sysfs_lock);
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
if (queue_is_mq(q))
blk_mq_debugfs_register(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
ret = disk_register_independent_access_ranges(disk);
if (ret)
{
struct request_queue *q = disk->queue;
struct rq_wb *rwb;
+ unsigned int memflags;
if (!__wbt_enable_default(disk))
return;
return;
}
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
blk_mq_debugfs_register_rq_qos(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
}
static u64 wbt_default_latency_nsec(struct request_queue *q)
blk_mq_unquiesce_queue(q);
out:
blk_mq_unfreeze_queue(q, memflags);
- mutex_lock(&q->debugfs_mutex);
+
+ memflags = blk_debugfs_lock(q);
blk_mq_debugfs_register_rq_qos(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return ret;
}
}
#endif
+/*
+ * debugfs directory and file creation can trigger fs reclaim, which can enter
+ * back into the block layer request_queue. This can cause deadlock if the
+ * queue is frozen. Use NOIO context together with debugfs_mutex to prevent fs
+ * reclaim from triggering block I/O.
+ */
+static inline void blk_debugfs_lock_nomemsave(struct request_queue *q)
+{
+ mutex_lock(&q->debugfs_mutex);
+}
+
+static inline void blk_debugfs_unlock_nomemrestore(struct request_queue *q)
+{
+ mutex_unlock(&q->debugfs_mutex);
+}
+
+static inline unsigned int __must_check blk_debugfs_lock(struct request_queue *q)
+{
+ unsigned int memflags = memalloc_noio_save();
+
+ blk_debugfs_lock_nomemsave(q);
+ return memflags;
+}
+
+static inline void blk_debugfs_unlock(struct request_queue *q,
+ unsigned int memflags)
+{
+ blk_debugfs_unlock_nomemrestore(q);
+ memalloc_noio_restore(memflags);
+}
+
#endif /* BLK_INTERNAL_H */
{
int ret;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
ret = __blk_trace_remove(q);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
struct blk_user_trace_setup2 buts2;
struct blk_user_trace_setup buts;
struct blk_trace *bt;
+ unsigned int memflags;
int ret;
ret = copy_from_user(&buts, arg, sizeof(buts));
.pid = buts.pid,
};
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts.buf_size, buts.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
strscpy(buts.name, buts2.name, BLKTRACE_BDEV_SIZE);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts, sizeof(buts))) {
blk_trace_remove(q);
{
struct blk_user_trace_setup2 buts2;
struct blk_trace *bt;
+ unsigned int memflags;
if (copy_from_user(&buts2, arg, sizeof(buts2)))
return -EFAULT;
if (buts2.flags != 0)
return -EINVAL;
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 2, bt, &buts2);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts2, sizeof(buts2))) {
blk_trace_remove(q);
struct blk_user_trace_setup2 buts2;
struct compat_blk_user_trace_setup cbuts;
struct blk_trace *bt;
+ unsigned int memflags;
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT;
.pid = cbuts.pid,
};
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
bdev);
if (IS_ERR(bt)) {
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
return PTR_ERR(bt);
}
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
if (copy_to_user(arg, &buts2.name, ARRAY_SIZE(buts2.name))) {
blk_trace_remove(q);
{
int ret;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
ret = __blk_trace_startstop(q, start);
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
struct blk_trace *bt;
ssize_t ret = -ENXIO;
- mutex_lock(&q->debugfs_mutex);
+ blk_debugfs_lock_nomemsave(q);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->debugfs_mutex));
ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock_nomemrestore(q);
return ret;
}
struct block_device *bdev = dev_to_bdev(dev);
struct request_queue *q = bdev_get_queue(bdev);
struct blk_trace *bt;
+ unsigned int memflags;
u64 value;
ssize_t ret = -EINVAL;
goto out;
}
- mutex_lock(&q->debugfs_mutex);
+ memflags = blk_debugfs_lock(q);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->debugfs_mutex));
}
out_unlock_bdev:
- mutex_unlock(&q->debugfs_mutex);
+ blk_debugfs_unlock(q, memflags);
out:
return ret ? ret : count;
}