]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
inode: port __I_SYNC to var event
authorChristian Brauner <brauner@kernel.org>
Fri, 23 Aug 2024 12:47:37 +0000 (14:47 +0200)
committerChristian Brauner <brauner@kernel.org>
Fri, 30 Aug 2024 06:22:39 +0000 (08:22 +0200)
Port the __I_SYNC mechanism to use the new var event mechanism.

Link: https://lore.kernel.org/r/20240823-work-i_state-v3-3-5cd5fd207a57@kernel.org
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/fs-writeback.c

index 1a5006329f6f03bd0f8c6fb6cb21aa46e34f44c3..d8bec3c1bb1fa78c287482130956612647a0d27a 100644 (file)
@@ -1386,12 +1386,13 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
 
 static void inode_sync_complete(struct inode *inode)
 {
+       assert_spin_locked(&inode->i_lock);
+
        inode->i_state &= ~I_SYNC;
        /* If inode is clean an unused, put it into LRU now... */
        inode_add_lru(inode);
-       /* Waiters must see I_SYNC cleared before being woken up */
-       smp_mb();
-       wake_up_bit(&inode->i_state, __I_SYNC);
+       /* Called with inode->i_lock which ensures memory ordering. */
+       inode_wake_up_bit(inode, __I_SYNC);
 }
 
 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
@@ -1512,17 +1513,25 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
  */
 void inode_wait_for_writeback(struct inode *inode)
 {
-       DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
-       wait_queue_head_t *wqh;
+       struct wait_bit_queue_entry wqe;
+       struct wait_queue_head *wq_head;
+
+       assert_spin_locked(&inode->i_lock);
+
+       if (!(inode->i_state & I_SYNC))
+               return;
 
-       lockdep_assert_held(&inode->i_lock);
-       wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
-       while (inode->i_state & I_SYNC) {
+       wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+       for (;;) {
+               prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+               /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+               if (!(inode->i_state & I_SYNC))
+                       break;
                spin_unlock(&inode->i_lock);
-               __wait_on_bit(wqh, &wq, bit_wait,
-                             TASK_UNINTERRUPTIBLE);
+               schedule();
                spin_lock(&inode->i_lock);
        }
+       finish_wait(wq_head, &wqe.wq_entry);
 }
 
 /*
@@ -1533,16 +1542,20 @@ void inode_wait_for_writeback(struct inode *inode)
 static void inode_sleep_on_writeback(struct inode *inode)
        __releases(inode->i_lock)
 {
-       DEFINE_WAIT(wait);
-       wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
-       int sleep;
+       struct wait_bit_queue_entry wqe;
+       struct wait_queue_head *wq_head;
+       bool sleep;
+
+       assert_spin_locked(&inode->i_lock);
 
-       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
-       sleep = inode->i_state & I_SYNC;
+       wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
+       prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
+       /* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
+       sleep = !!(inode->i_state & I_SYNC);
        spin_unlock(&inode->i_lock);
        if (sleep)
                schedule();
-       finish_wait(wqh, &wait);
+       finish_wait(wq_head, &wqe.wq_entry);
 }
 
 /*