]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.4
authorSasha Levin <sashal@kernel.org>
Wed, 18 Mar 2020 20:57:36 +0000 (16:57 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 18 Mar 2020 20:57:36 +0000 (16:57 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.4/jbd2-fix-data-races-at-struct-journal_head.patch [new file with mode: 0644]
queue-4.4/net-ks8851-ml-fix-irq-handling-and-locking.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/signal-avoid-double-atomic-counter-increments-for-us.patch [new file with mode: 0644]

diff --git a/queue-4.4/jbd2-fix-data-races-at-struct-journal_head.patch b/queue-4.4/jbd2-fix-data-races-at-struct-journal_head.patch
new file mode 100644 (file)
index 0000000..5963f30
--- /dev/null
@@ -0,0 +1,110 @@
+From 9056ecc1f355b2e774961ed6c36944c5fecc7146 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2020 23:31:11 -0500
+Subject: jbd2: fix data races at struct journal_head
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit 6c5d911249290f41f7b50b43344a7520605b1acb ]
+
+journal_head::b_transaction and journal_head::b_next_transaction could
+be accessed concurrently as noticed by KCSAN,
+
+ LTP: starting fsync04
+ /dev/zero: Can't open blockdev
+ EXT4-fs (loop0): mounting ext3 file system using the ext4 subsystem
+ EXT4-fs (loop0): mounted filesystem with ordered data mode. Opts: (null)
+ ==================================================================
+ BUG: KCSAN: data-race in __jbd2_journal_refile_buffer [jbd2] / jbd2_write_access_granted [jbd2]
+
+ write to 0xffff99f9b1bd0e30 of 8 bytes by task 25721 on cpu 70:
+  __jbd2_journal_refile_buffer+0xdd/0x210 [jbd2]
+  __jbd2_journal_refile_buffer at fs/jbd2/transaction.c:2569
+  jbd2_journal_commit_transaction+0x2d15/0x3f20 [jbd2]
+  (inlined by) jbd2_journal_commit_transaction at fs/jbd2/commit.c:1034
+  kjournald2+0x13b/0x450 [jbd2]
+  kthread+0x1cd/0x1f0
+  ret_from_fork+0x27/0x50
+
+ read to 0xffff99f9b1bd0e30 of 8 bytes by task 25724 on cpu 68:
+  jbd2_write_access_granted+0x1b2/0x250 [jbd2]
+  jbd2_write_access_granted at fs/jbd2/transaction.c:1155
+  jbd2_journal_get_write_access+0x2c/0x60 [jbd2]
+  __ext4_journal_get_write_access+0x50/0x90 [ext4]
+  ext4_mb_mark_diskspace_used+0x158/0x620 [ext4]
+  ext4_mb_new_blocks+0x54f/0xca0 [ext4]
+  ext4_ind_map_blocks+0xc79/0x1b40 [ext4]
+  ext4_map_blocks+0x3b4/0x950 [ext4]
+  _ext4_get_block+0xfc/0x270 [ext4]
+  ext4_get_block+0x3b/0x50 [ext4]
+  __block_write_begin_int+0x22e/0xae0
+  __block_write_begin+0x39/0x50
+  ext4_write_begin+0x388/0xb50 [ext4]
+  generic_perform_write+0x15d/0x290
+  ext4_buffered_write_iter+0x11f/0x210 [ext4]
+  ext4_file_write_iter+0xce/0x9e0 [ext4]
+  new_sync_write+0x29c/0x3b0
+  __vfs_write+0x92/0xa0
+  vfs_write+0x103/0x260
+  ksys_write+0x9d/0x130
+  __x64_sys_write+0x4c/0x60
+  do_syscall_64+0x91/0xb05
+  entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ 5 locks held by fsync04/25724:
+  #0: ffff99f9911093f8 (sb_writers#13){.+.+}, at: vfs_write+0x21c/0x260
+  #1: ffff99f9db4c0348 (&sb->s_type->i_mutex_key#15){+.+.}, at: ext4_buffered_write_iter+0x65/0x210 [ext4]
+  #2: ffff99f5e7dfcf58 (jbd2_handle){++++}, at: start_this_handle+0x1c1/0x9d0 [jbd2]
+  #3: ffff99f9db4c0168 (&ei->i_data_sem){++++}, at: ext4_map_blocks+0x176/0x950 [ext4]
+  #4: ffffffff99086b40 (rcu_read_lock){....}, at: jbd2_write_access_granted+0x4e/0x250 [jbd2]
+ irq event stamp: 1407125
+ hardirqs last  enabled at (1407125): [<ffffffff980da9b7>] __find_get_block+0x107/0x790
+ hardirqs last disabled at (1407124): [<ffffffff980da8f9>] __find_get_block+0x49/0x790
+ softirqs last  enabled at (1405528): [<ffffffff98a0034c>] __do_softirq+0x34c/0x57c
+ softirqs last disabled at (1405521): [<ffffffff97cc67a2>] irq_exit+0xa2/0xc0
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 68 PID: 25724 Comm: fsync04 Tainted: G L 5.6.0-rc2-next-20200221+ #7
+ Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019
+
+The plain reads are outside of jh->b_state_lock critical section which result
+in data races. Fix them by adding pairs of READ|WRITE_ONCE().
+
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Qian Cai <cai@lca.pw>
+Link: https://lore.kernel.org/r/20200222043111.2227-1-cai@lca.pw
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jbd2/transaction.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6457023d8fac1..3233e5ac9774f 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1041,8 +1041,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
+       /* For undo access buffer must have data copied */
+       if (undo && !jh->b_committed_data)
+               goto out;
+-      if (jh->b_transaction != handle->h_transaction &&
+-          jh->b_next_transaction != handle->h_transaction)
++      if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
++          READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
+               goto out;
+       /*
+        * There are two reasons for the barrier here:
+@@ -2458,8 +2458,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
+        * our jh reference and thus __jbd2_journal_file_buffer() must not
+        * take a new one.
+        */
+-      jh->b_transaction = jh->b_next_transaction;
+-      jh->b_next_transaction = NULL;
++      WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
++      WRITE_ONCE(jh->b_next_transaction, NULL);
+       if (buffer_freed(bh))
+               jlist = BJ_Forget;
+       else if (jh->b_modified)
+-- 
+2.20.1
+
diff --git a/queue-4.4/net-ks8851-ml-fix-irq-handling-and-locking.patch b/queue-4.4/net-ks8851-ml-fix-irq-handling-and-locking.patch
new file mode 100644 (file)
index 0000000..07af563
--- /dev/null
@@ -0,0 +1,102 @@
+From 13566be586cf414e7e1d8a5c92e80e9337558f2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 Feb 2020 14:38:40 +0100
+Subject: net: ks8851-ml: Fix IRQ handling and locking
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 44343418d0f2f623cb9da6f5000df793131cbe3b ]
+
+The KS8851 requires that packet RX and TX are mutually exclusive.
+Currently, the driver hopes to achieve this by disabling interrupt
+from the card by writing the card registers and by disabling the
+interrupt on the interrupt controller. This however is racy on SMP.
+
+Replace this approach by expanding the spinlock used around the
+ks_start_xmit() TX path to ks_irq() RX path to assure true mutual
+exclusion and remove the interrupt enabling/disabling, which is
+now not needed anymore. Furthermore, disable interrupts also in
+ks_net_stop(), which was missing before.
+
+Note that a massive improvement here would be to re-use the KS8851
+driver approach, which is to move the TX path into a worker thread,
+interrupt handling to threaded interrupt, and synchronize everything
+with mutexes, but that would be a much bigger rework, for a separate
+patch.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Lukas Wunner <lukas@wunner.de>
+Cc: Petr Stetiar <ynezz@true.cz>
+Cc: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/micrel/ks8851_mll.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index d94e151cff12b..d4747caf1e7cc 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -831,14 +831,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
+ {
+       struct net_device *netdev = pw;
+       struct ks_net *ks = netdev_priv(netdev);
++      unsigned long flags;
+       u16 status;
++      spin_lock_irqsave(&ks->statelock, flags);
+       /*this should be the first in IRQ handler */
+       ks_save_cmd_reg(ks);
+       status = ks_rdreg16(ks, KS_ISR);
+       if (unlikely(!status)) {
+               ks_restore_cmd_reg(ks);
++              spin_unlock_irqrestore(&ks->statelock, flags);
+               return IRQ_NONE;
+       }
+@@ -864,6 +867,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
+               ks->netdev->stats.rx_over_errors++;
+       /* this should be the last in IRQ handler*/
+       ks_restore_cmd_reg(ks);
++      spin_unlock_irqrestore(&ks->statelock, flags);
+       return IRQ_HANDLED;
+ }
+@@ -933,6 +937,7 @@ static int ks_net_stop(struct net_device *netdev)
+       /* shutdown RX/TX QMU */
+       ks_disable_qmu(ks);
++      ks_disable_int(ks);
+       /* set powermode to soft power down to save power */
+       ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+@@ -989,10 +994,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+       netdev_tx_t retv = NETDEV_TX_OK;
+       struct ks_net *ks = netdev_priv(netdev);
++      unsigned long flags;
+-      disable_irq(netdev->irq);
+-      ks_disable_int(ks);
+-      spin_lock(&ks->statelock);
++      spin_lock_irqsave(&ks->statelock, flags);
+       /* Extra space are required:
+       *  4 byte for alignment, 4 for status/length, 4 for CRC
+@@ -1006,9 +1010,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+               dev_kfree_skb(skb);
+       } else
+               retv = NETDEV_TX_BUSY;
+-      spin_unlock(&ks->statelock);
+-      ks_enable_int(ks);
+-      enable_irq(netdev->irq);
++      spin_unlock_irqrestore(&ks->statelock, flags);
+       return retv;
+ }
+-- 
+2.20.1
+
index 9cd455c5bdb244660000537bfbbd61847c024ec1..b6486e507176a4bd12db3d8ec85a0b5daf5b19a2 100644 (file)
@@ -84,3 +84,6 @@ batman-adv-only-read-ogm-tvlv_len-after-buffer-len-check.patch
 batman-adv-avoid-free-alloc-race-when-handling-ogm-buffer.patch
 batman-adv-don-t-schedule-ogm-for-disabled-interface.patch
 perf-amd-uncore-replace-manual-sampling-check-with-c.patch
+net-ks8851-ml-fix-irq-handling-and-locking.patch
+signal-avoid-double-atomic-counter-increments-for-us.patch
+jbd2-fix-data-races-at-struct-journal_head.patch
diff --git a/queue-4.4/signal-avoid-double-atomic-counter-increments-for-us.patch b/queue-4.4/signal-avoid-double-atomic-counter-increments-for-us.patch
new file mode 100644 (file)
index 0000000..1275a46
--- /dev/null
@@ -0,0 +1,125 @@
+From d6a8e8fddf34b12e1aba345898373bec1c89f621 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 12:47:14 -0800
+Subject: signal: avoid double atomic counter increments for user accounting
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit fda31c50292a5062332fa0343c084bd9f46604d9 ]
+
+When queueing a signal, we increment both the users count of pending
+signals (for RLIMIT_SIGPENDING tracking) and we increment the refcount
+of the user struct itself (because we keep a reference to the user in
+the signal structure in order to correctly account for it when freeing).
+
+That turns out to be fairly expensive, because both of them are atomic
+updates, and particularly under extreme signal handling pressure on big
+machines, you can get a lot of cache contention on the user struct.
+That can then cause horrid cacheline ping-pong when you do these
+multiple accesses.
+
+So change the reference counting to only pin the user for the _first_
+pending signal, and to unpin it when the last pending signal is
+dequeued.  That means that when a user sees a lot of concurrent signal
+queuing - which is the only situation when this matters - the only
+atomic access needed is generally the 'sigpending' count update.
+
+This was noticed because of a particularly odd timing artifact on a
+dual-socket 96C/192T Cascade Lake platform: when you get into bad
+contention, on that machine for some reason seems to be much worse when
+the contention happens in the upper 32-byte half of the cacheline.
+
+As a result, the kernel test robot will-it-scale 'signal1' benchmark had
+an odd performance regression simply due to random alignment of the
+'struct user_struct' (and pointed to a completely unrelated and
+apparently nonsensical commit for the regression).
+
+Avoiding the double increments (and decrements on the dequeueing side,
+of course) makes for much less contention and hugely improved
+performance on that will-it-scale microbenchmark.
+
+Quoting Feng Tang:
+
+ "It makes a big difference, that the performance score is tripled! bump
+  from original 17000 to 54000. Also the gap between 5.0-rc6 and
+  5.0-rc6+Jiri's patch is reduced to around 2%"
+
+[ The "2% gap" is the odd cacheline placement difference on that
+  platform: under the extreme contention case, the effect of which half
+  of the cacheline was hot was 5%, so with the reduced contention the
+  odd timing artifact is reduced too ]
+
+It does help in the non-contended case too, but is not nearly as
+noticeable.
+
+Reported-and-tested-by: Feng Tang <feng.tang@intel.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Huang, Ying <ying.huang@intel.com>
+Cc: Philip Li <philip.li@intel.com>
+Cc: Andi Kleen <andi.kleen@intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/signal.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 7e4a4b199a117..90a94e54db092 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -373,27 +373,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ {
+       struct sigqueue *q = NULL;
+       struct user_struct *user;
++      int sigpending;
+       /*
+        * Protect access to @t credentials. This can go away when all
+        * callers hold rcu read lock.
++       *
++       * NOTE! A pending signal will hold on to the user refcount,
++       * and we get/put the refcount only when the sigpending count
++       * changes from/to zero.
+        */
+       rcu_read_lock();
+-      user = get_uid(__task_cred(t)->user);
+-      atomic_inc(&user->sigpending);
++      user = __task_cred(t)->user;
++      sigpending = atomic_inc_return(&user->sigpending);
++      if (sigpending == 1)
++              get_uid(user);
+       rcu_read_unlock();
+-      if (override_rlimit ||
+-          atomic_read(&user->sigpending) <=
+-                      task_rlimit(t, RLIMIT_SIGPENDING)) {
++      if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+               q = kmem_cache_alloc(sigqueue_cachep, flags);
+       } else {
+               print_dropped_signal(sig);
+       }
+       if (unlikely(q == NULL)) {
+-              atomic_dec(&user->sigpending);
+-              free_uid(user);
++              if (atomic_dec_and_test(&user->sigpending))
++                      free_uid(user);
+       } else {
+               INIT_LIST_HEAD(&q->list);
+               q->flags = 0;
+@@ -407,8 +412,8 @@ static void __sigqueue_free(struct sigqueue *q)
+ {
+       if (q->flags & SIGQUEUE_PREALLOC)
+               return;
+-      atomic_dec(&q->user->sigpending);
+-      free_uid(q->user);
++      if (atomic_dec_and_test(&q->user->sigpending))
++              free_uid(q->user);
+       kmem_cache_free(sigqueue_cachep, q);
+ }
+-- 
+2.20.1
+