]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
loop: add hint for handling aio via IOCB_NOWAIT
authorMing Lei <ming.lei@redhat.com>
Wed, 15 Oct 2025 11:07:31 +0000 (19:07 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 18 Nov 2025 13:49:52 +0000 (06:49 -0700)
Add hint for using IOCB_NOWAIT to handle loop aio command for avoiding
to cause write(especially randwrite) perf regression on sparse backed file.

Try IOCB_NOWAIT in the following situations:

- backing file is block device

OR

- READ aio command

OR

- there isn't any queued blocking async WRITEs, because NOWAIT won't cause
contention with blocking WRITE, which often implies exclusive lock

With this simple policy, perf regression of randwrite/write on sparse
backing file is fixed.

Link: https://lore.kernel.org/dm-devel/7d6ae2c9-df8e-50d0-7ad6-b787cb3cfab4@redhat.com/
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/loop.c

index 94478c02fea6bbfa9043e6bc324affe556d954a8..9b842d767381a11138823e0b4efde3b58b8878fe 100644 (file)
@@ -68,6 +68,7 @@ struct loop_device {
        struct rb_root          worker_tree;
        struct timer_list       timer;
        bool                    sysfs_inited;
+       unsigned                lo_nr_blocking_writes;
 
        struct request_queue    *lo_queue;
        struct blk_mq_tag_set   tag_set;
@@ -467,6 +468,33 @@ fail:
        return -EIOCBQUEUED;
 }
 
+static inline bool lo_aio_try_nowait(struct loop_device *lo,
+               struct loop_cmd *cmd)
+{
+       struct file *file = lo->lo_backing_file;
+       struct inode *inode = file->f_mapping->host;
+       struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+       /* NOWAIT works fine for backing block device */
+       if (S_ISBLK(inode->i_mode))
+               return true;
+
+       /*
+        * NOWAIT is supposed to be fine for READ without contending with
+        * blocking WRITE
+        */
+       if (req_op(rq) == REQ_OP_READ)
+               return true;
+
+       /*
+        * If there is any queued non-NOWAIT async WRITE , don't try new
+        * NOWAIT WRITE for avoiding contention
+        *
+        * Here we focus on handling stable FS block mapping via NOWAIT
+        */
+       return READ_ONCE(lo->lo_nr_blocking_writes) == 0;
+}
+
 static int lo_rw_aio_nowait(struct loop_device *lo, struct loop_cmd *cmd,
                            int rw)
 {
@@ -478,6 +506,9 @@ static int lo_rw_aio_nowait(struct loop_device *lo, struct loop_cmd *cmd,
        if (unlikely(ret))
                goto fail;
 
+       if (!lo_aio_try_nowait(lo, cmd))
+               return -EAGAIN;
+
        cmd->iocb.ki_flags |= IOCB_NOWAIT;
        ret = lo_submit_rw_aio(lo, cmd, nr_bvec, rw);
 fail:
@@ -780,12 +811,19 @@ static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
        return sysfs_emit(buf, "%s\n", dio ? "1" : "0");
 }
 
+static ssize_t loop_attr_nr_blocking_writes_show(struct loop_device *lo,
+                                                char *buf)
+{
+       return sysfs_emit(buf, "%u\n", lo->lo_nr_blocking_writes);
+}
+
 LOOP_ATTR_RO(backing_file);
 LOOP_ATTR_RO(offset);
 LOOP_ATTR_RO(sizelimit);
 LOOP_ATTR_RO(autoclear);
 LOOP_ATTR_RO(partscan);
 LOOP_ATTR_RO(dio);
+LOOP_ATTR_RO(nr_blocking_writes);
 
 static struct attribute *loop_attrs[] = {
        &loop_attr_backing_file.attr,
@@ -794,6 +832,7 @@ static struct attribute *loop_attrs[] = {
        &loop_attr_autoclear.attr,
        &loop_attr_partscan.attr,
        &loop_attr_dio.attr,
+       &loop_attr_nr_blocking_writes.attr,
        NULL,
 };
 
@@ -869,6 +908,24 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
 }
 #endif
 
+static inline void loop_inc_blocking_writes(struct loop_device *lo,
+               struct loop_cmd *cmd)
+{
+       lockdep_assert_held(&lo->lo_work_lock);
+
+       if (req_op(blk_mq_rq_from_pdu(cmd)) == REQ_OP_WRITE)
+               lo->lo_nr_blocking_writes += 1;
+}
+
+static inline void loop_dec_blocking_writes(struct loop_device *lo,
+               struct loop_cmd *cmd)
+{
+       lockdep_assert_held(&lo->lo_work_lock);
+
+       if (req_op(blk_mq_rq_from_pdu(cmd)) == REQ_OP_WRITE)
+               lo->lo_nr_blocking_writes -= 1;
+}
+
 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
 {
        struct request __maybe_unused *rq = blk_mq_rq_from_pdu(cmd);
@@ -951,6 +1008,8 @@ queue_work:
                work = &lo->rootcg_work;
                cmd_list = &lo->rootcg_cmd_list;
        }
+       if (cmd->use_aio)
+               loop_inc_blocking_writes(lo, cmd);
        list_add_tail(&cmd->list_entry, cmd_list);
        queue_work(lo->workqueue, work);
        spin_unlock_irq(&lo->lo_work_lock);
@@ -2052,6 +2111,8 @@ static void loop_process_work(struct loop_worker *worker,
                cond_resched();
 
                spin_lock_irq(&lo->lo_work_lock);
+               if (cmd->use_aio)
+                       loop_dec_blocking_writes(lo, cmd);
        }
 
        /*