]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
zram: add writeback batch size device attr
authorSergey Senozhatsky <senozhatsky@chromium.org>
Sat, 22 Nov 2025 07:40:25 +0000 (16:40 +0900)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:53 +0000 (15:08 -0800)
Introduce writeback_batch_size device attribute so that the maximum number
of in-flight writeback bio requests can be configured at run-time
per-device.  This essentially enables batched bio writeback.

Link: https://lkml.kernel.org/r/20251122074029.3948921-3-senozhatsky@chromium.org
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Reviewed-by: Brian Geffon <bgeffon@google.com>
Cc: Minchan Kim <minchan@google.com>
Cc: Richard Chang <richardycc@google.com>
Cc: Yuwen Chen <ywen.chen@foxmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.h

index 06ea56f0a00f6e52965d8eaddaaa009e27aa2ed3..5906ba0611657d46de24e0756d7f62b8482465cd 100644 (file)
@@ -590,6 +590,40 @@ static ssize_t writeback_limit_show(struct device *dev,
        return sysfs_emit(buf, "%llu\n", val);
 }
 
+static ssize_t writeback_batch_size_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t len)
+{
+       struct zram *zram = dev_to_zram(dev);
+       u32 val;
+
+       if (kstrtouint(buf, 10, &val))
+               return -EINVAL;
+
+       if (!val)
+               return -EINVAL;
+
+       down_write(&zram->init_lock);
+       zram->wb_batch_size = val;
+       up_write(&zram->init_lock);
+
+       return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       u32 val;
+       struct zram *zram = dev_to_zram(dev);
+
+       down_read(&zram->init_lock);
+       val = zram->wb_batch_size;
+       up_read(&zram->init_lock);
+
+       return sysfs_emit(buf, "%u\n", val);
+}
+
 static void reset_bdev(struct zram *zram)
 {
        if (!zram->backing_dev)
@@ -781,10 +815,7 @@ static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
        kfree(wb_ctl);
 }
 
-/* XXX: should be a per-device sysfs attr */
-#define ZRAM_WB_REQ_CNT 32
-
-static struct zram_wb_ctl *init_wb_ctl(void)
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
 {
        struct zram_wb_ctl *wb_ctl;
        int i;
@@ -799,7 +830,7 @@ static struct zram_wb_ctl *init_wb_ctl(void)
        init_waitqueue_head(&wb_ctl->done_wait);
        spin_lock_init(&wb_ctl->done_lock);
 
-       for (i = 0; i < ZRAM_WB_REQ_CNT; i++) {
+       for (i = 0; i < zram->wb_batch_size; i++) {
                struct zram_wb_req *req;
 
                /*
@@ -1200,7 +1231,7 @@ static ssize_t writeback_store(struct device *dev,
                goto release_init_lock;
        }
 
-       wb_ctl = init_wb_ctl();
+       wb_ctl = init_wb_ctl(zram);
        if (!wb_ctl) {
                ret = -ENOMEM;
                goto release_init_lock;
@@ -2843,6 +2874,7 @@ static DEVICE_ATTR_RW(backing_dev);
 static DEVICE_ATTR_WO(writeback);
 static DEVICE_ATTR_RW(writeback_limit);
 static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
 #endif
 #ifdef CONFIG_ZRAM_MULTI_COMP
 static DEVICE_ATTR_RW(recomp_algorithm);
@@ -2864,6 +2896,7 @@ static struct attribute *zram_disk_attrs[] = {
        &dev_attr_writeback.attr,
        &dev_attr_writeback_limit.attr,
        &dev_attr_writeback_limit_enable.attr,
+       &dev_attr_writeback_batch_size.attr,
 #endif
        &dev_attr_io_stat.attr,
        &dev_attr_mm_stat.attr,
@@ -2925,6 +2958,7 @@ static int zram_add(void)
 
        init_rwsem(&zram->init_lock);
 #ifdef CONFIG_ZRAM_WRITEBACK
+       zram->wb_batch_size = 32;
        spin_lock_init(&zram->wb_limit_lock);
 #endif
 
index 6cee93f9c0d06948401b0c69313f68a5d34678c1..1a647f42c1a48056d032e9dd841ba36a9e95ef14 100644 (file)
@@ -129,6 +129,7 @@ struct zram {
        struct file *backing_dev;
        spinlock_t wb_limit_lock;
        bool wb_limit_enable;
+       u32 wb_batch_size;
        u64 bd_wb_limit;
        struct block_device *bdev;
        unsigned long *bitmap;