]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jan 2024 14:34:22 +0000 (15:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Jan 2024 14:34:22 +0000 (15:34 +0100)
added patches:
ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch
mm-memory-failure-cast-index-to-loff_t-before-shifting-it.patch
mm-memory-failure-check-the-mapcount-of-the-precise-page.patch
mm-migrate-high-order-folios-in-swap-cache-correctly.patch
platform-x86-p2sb-allow-p2sb_bar-calls-during-pci-device-probe.patch

queue-6.1/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch [new file with mode: 0644]
queue-6.1/mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch [new file with mode: 0644]
queue-6.1/mm-memory-failure-cast-index-to-loff_t-before-shifting-it.patch [new file with mode: 0644]
queue-6.1/mm-memory-failure-check-the-mapcount-of-the-precise-page.patch [new file with mode: 0644]
queue-6.1/mm-migrate-high-order-folios-in-swap-cache-correctly.patch [new file with mode: 0644]
queue-6.1/platform-x86-p2sb-allow-p2sb_bar-calls-during-pci-device-probe.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch b/queue-6.1/ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
new file mode 100644 (file)
index 0000000..027b001
--- /dev/null
@@ -0,0 +1,56 @@
+From d10c77873ba1e9e6b91905018e29e196fd5f863d Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Wed, 20 Dec 2023 15:52:11 +0900
+Subject: ksmbd: fix slab-out-of-bounds in smb_strndup_from_utf16()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit d10c77873ba1e9e6b91905018e29e196fd5f863d upstream.
+
+If ->NameOffset/Length is bigger than ->CreateContextsOffset/Length,
+ksmbd_check_message doesn't validate request buffer it correctly.
+So slab-out-of-bounds warning from calling smb_strndup_from_utf16()
+in smb2_open() could happen. If ->NameLength is non-zero, Set the larger
+of the two sums (Name and CreateContext size) as the offset and length of
+the data area.
+
+Reported-by: Yang Chaoming <lometsj@live.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/smb2misc.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/smb2misc.c
++++ b/fs/smb/server/smb2misc.c
+@@ -106,16 +106,25 @@ static int smb2_get_data_area_len(unsign
+               break;
+       case SMB2_CREATE:
+       {
++              unsigned short int name_off =
++                      le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
++              unsigned short int name_len =
++                      le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++
+               if (((struct smb2_create_req *)hdr)->CreateContextsLength) {
+                       *off = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsOffset);
+                       *len = le32_to_cpu(((struct smb2_create_req *)
+                               hdr)->CreateContextsLength);
+-                      break;
++                      if (!name_len)
++                              break;
++
++                      if (name_off + name_len < (u64)*off + *len)
++                              break;
+               }
+-              *off = le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+-              *len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
++              *off = name_off;
++              *len = name_len;
+               break;
+       }
+       case SMB2_QUERY_INFO:
diff --git a/queue-6.1/mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch b/queue-6.1/mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch
new file mode 100644 (file)
index 0000000..29a590a
--- /dev/null
@@ -0,0 +1,106 @@
+From e2c27b803bb664748e090d99042ac128b3f88d92 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Wed, 13 Dec 2023 14:23:24 +0800
+Subject: mm/filemap: avoid buffered read/write race to read inconsistent data
+
+From: Baokun Li <libaokun1@huawei.com>
+
+commit e2c27b803bb664748e090d99042ac128b3f88d92 upstream.
+
+The following concurrency may cause the data read to be inconsistent with
+the data on disk:
+
+             cpu1                           cpu2
+------------------------------|------------------------------
+                               // Buffered write 2048 from 0
+                               ext4_buffered_write_iter
+                                generic_perform_write
+                                 copy_page_from_iter_atomic
+                                 ext4_da_write_end
+                                  ext4_da_do_write_end
+                                   block_write_end
+                                    __block_commit_write
+                                     folio_mark_uptodate
+// Buffered read 4096 from 0          smp_wmb()
+ext4_file_read_iter                   set_bit(PG_uptodate, folio_flags)
+ generic_file_read_iter            i_size_write // 2048
+  filemap_read                     unlock_page(page)
+   filemap_get_pages
+    filemap_get_read_batch
+    folio_test_uptodate(folio)
+     ret = test_bit(PG_uptodate, folio_flags)
+     if (ret)
+      smp_rmb();
+      // Ensure that the data in page 0-2048 is up-to-date.
+
+                               // New buffered write 2048 from 2048
+                               ext4_buffered_write_iter
+                                generic_perform_write
+                                 copy_page_from_iter_atomic
+                                 ext4_da_write_end
+                                  ext4_da_do_write_end
+                                   block_write_end
+                                    __block_commit_write
+                                     folio_mark_uptodate
+                                      smp_wmb()
+                                      set_bit(PG_uptodate, folio_flags)
+                                   i_size_write // 4096
+                                   unlock_page(page)
+
+   isize = i_size_read(inode) // 4096
+   // Read the latest isize 4096, but without smp_rmb(), there may be
+   // Load-Load disorder resulting in the data in the 2048-4096 range
+   // in the page is not up-to-date.
+   copy_page_to_iter
+   // copyout 4096
+
+In the concurrency above, we read the updated i_size, but there is no read
+barrier to ensure that the data in the page is the same as the i_size at
+this point, so we may copy the unsynchronized page out.  Hence adding the
+missing read memory barrier to fix this.
+
+This is a Load-Load reordering issue, which only occurs on some weak
+mem-ordering architectures (e.g.  ARM64, ALPHA), but not on strong
+mem-ordering architectures (e.g.  X86).  And theoretically the problem
+doesn't only happen on ext4, filesystems that call filemap_read() but
+don't hold inode lock (e.g.  btrfs, f2fs, ubifs ...) will have this
+problem, while filesystems with inode lock (e.g.  xfs, nfs) won't have
+this problem.
+
+Link: https://lkml.kernel.org/r/20231213062324.739009-1-libaokun1@huawei.com
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Andreas Dilger <adilger.kernel@dilger.ca>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: yangerkun <yangerkun@huawei.com>
+Cc: Yu Kuai <yukuai3@huawei.com>
+Cc: Zhang Yi <yi.zhang@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/filemap.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2745,6 +2745,15 @@ ssize_t filemap_read(struct kiocb *iocb,
+               end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
+               /*
++               * Pairs with a barrier in
++               * block_write_end()->mark_buffer_dirty() or other page
++               * dirtying routines like iomap_write_end() to ensure
++               * changes to page contents are visible before we see
++               * increased inode size.
++               */
++              smp_rmb();
++
++              /*
+                * Once we start copying data, we don't want to be touching any
+                * cachelines that might be contended:
+                */
diff --git a/queue-6.1/mm-memory-failure-cast-index-to-loff_t-before-shifting-it.patch b/queue-6.1/mm-memory-failure-cast-index-to-loff_t-before-shifting-it.patch
new file mode 100644 (file)
index 0000000..390b7cc
--- /dev/null
@@ -0,0 +1,36 @@
+From 39ebd6dce62d8cfe3864e16148927a139f11bc9a Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Mon, 18 Dec 2023 13:58:37 +0000
+Subject: mm/memory-failure: cast index to loff_t before shifting it
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit 39ebd6dce62d8cfe3864e16148927a139f11bc9a upstream.
+
+On 32-bit systems, we'll lose the top bits of index because arithmetic
+will be performed in unsigned long instead of unsigned long long.  This
+affects files over 4GB in size.
+
+Link: https://lkml.kernel.org/r/20231218135837.3310403-4-willy@infradead.org
+Fixes: 6100e34b2526 ("mm, memory_failure: Teach memory_failure() about dev_pagemap pages")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1560,7 +1560,7 @@ static void unmap_and_kill(struct list_h
+                * mapping being torn down is communicated in siginfo, see
+                * kill_proc()
+                */
+-              loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
++              loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
+               unmap_mapping_range(mapping, start, size, 0);
+       }
diff --git a/queue-6.1/mm-memory-failure-check-the-mapcount-of-the-precise-page.patch b/queue-6.1/mm-memory-failure-check-the-mapcount-of-the-precise-page.patch
new file mode 100644 (file)
index 0000000..e8ca2a5
--- /dev/null
@@ -0,0 +1,49 @@
+From c79c5a0a00a9457718056b588f312baadf44e471 Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Mon, 18 Dec 2023 13:58:36 +0000
+Subject: mm/memory-failure: check the mapcount of the precise page
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit c79c5a0a00a9457718056b588f312baadf44e471 upstream.
+
+A process may map only some of the pages in a folio, and might be missed
+if it maps the poisoned page but not the head page.  Or it might be
+unnecessarily hit if it maps the head page, but not the poisoned page.
+
+Link: https://lkml.kernel.org/r/20231218135837.3310403-3-willy@infradead.org
+Fixes: 7af446a841a2 ("HWPOISON, hugetlb: enable error handling path for hugepage")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory-failure.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1421,7 +1421,7 @@ static bool hwpoison_user_mappings(struc
+        * This check implies we don't kill processes if their pages
+        * are in the swap cache early. Those are always late kills.
+        */
+-      if (!page_mapped(hpage))
++      if (!page_mapped(p))
+               return true;
+       if (PageKsm(p)) {
+@@ -1477,10 +1477,10 @@ static bool hwpoison_user_mappings(struc
+               try_to_unmap(folio, ttu);
+       }
+-      unmap_success = !page_mapped(hpage);
++      unmap_success = !page_mapped(p);
+       if (!unmap_success)
+               pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
+-                     pfn, page_mapcount(hpage));
++                     pfn, page_mapcount(p));
+       /*
+        * try_to_unmap() might put mlocked page in lru cache, so call
diff --git a/queue-6.1/mm-migrate-high-order-folios-in-swap-cache-correctly.patch b/queue-6.1/mm-migrate-high-order-folios-in-swap-cache-correctly.patch
new file mode 100644 (file)
index 0000000..3cca431
--- /dev/null
@@ -0,0 +1,73 @@
+From fc346d0a70a13d52fe1c4bc49516d83a42cd7c4c Mon Sep 17 00:00:00 2001
+From: Charan Teja Kalla <quic_charante@quicinc.com>
+Date: Thu, 14 Dec 2023 04:58:41 +0000
+Subject: mm: migrate high-order folios in swap cache correctly
+
+From: Charan Teja Kalla <quic_charante@quicinc.com>
+
+commit fc346d0a70a13d52fe1c4bc49516d83a42cd7c4c upstream.
+
+Large folios occupy N consecutive entries in the swap cache instead of
+using multi-index entries like the page cache.  However, if a large folio
+is re-added to the LRU list, it can be migrated.  The migration code was
+not aware of the difference between the swap cache and the page cache and
+assumed that a single xas_store() would be sufficient.
+
+This leaves potentially many stale pointers to the now-migrated folio in
+the swap cache, which can lead to almost arbitrary data corruption in the
+future.  This can also manifest as infinite loops with the RCU read lock
+held.
+
+[willy@infradead.org: modifications to the changelog & tweaked the fix]
+Fixes: 3417013e0d18 ("mm/migrate: Add folio_migrate_mapping()")
+Link: https://lkml.kernel.org/r/20231214045841.961776-1-willy@infradead.org
+Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reported-by: Charan Teja Kalla <quic_charante@quicinc.com>
+Closes: https://lkml.kernel.org/r/1700569840-17327-1-git-send-email-quic_charante@quicinc.com
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Shakeel Butt <shakeelb@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -388,6 +388,7 @@ int folio_migrate_mapping(struct address
+       int dirty;
+       int expected_count = folio_expected_refs(mapping, folio) + extra_count;
+       long nr = folio_nr_pages(folio);
++      long entries, i;
+       if (!mapping) {
+               /* Anonymous page without mapping */
+@@ -425,8 +426,10 @@ int folio_migrate_mapping(struct address
+                       folio_set_swapcache(newfolio);
+                       newfolio->private = folio_get_private(folio);
+               }
++              entries = nr;
+       } else {
+               VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
++              entries = 1;
+       }
+       /* Move dirty while page refs frozen and newpage not yet exposed */
+@@ -436,7 +439,11 @@ int folio_migrate_mapping(struct address
+               folio_set_dirty(newfolio);
+       }
+-      xas_store(&xas, newfolio);
++      /* Swap cache still stores N entries instead of a high-order entry */
++      for (i = 0; i < entries; i++) {
++              xas_store(&xas, newfolio);
++              xas_next(&xas);
++      }
+       /*
+        * Drop cache reference from old page by unfreezing
diff --git a/queue-6.1/platform-x86-p2sb-allow-p2sb_bar-calls-during-pci-device-probe.patch b/queue-6.1/platform-x86-p2sb-allow-p2sb_bar-calls-during-pci-device-probe.patch
new file mode 100644 (file)
index 0000000..e32458f
--- /dev/null
@@ -0,0 +1,284 @@
+From b28ff7a7c3245d7f62acc20f15b4361292fe4117 Mon Sep 17 00:00:00 2001
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Date: Fri, 29 Dec 2023 15:39:11 +0900
+Subject: platform/x86: p2sb: Allow p2sb_bar() calls during PCI device probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+
+commit b28ff7a7c3245d7f62acc20f15b4361292fe4117 upstream.
+
+p2sb_bar() unhides P2SB device to get resources from the device. It
+guards the operation by locking pci_rescan_remove_lock so that parallel
+rescans do not find the P2SB device. However, this lock causes deadlock
+when PCI bus rescan is triggered by /sys/bus/pci/rescan. The rescan
+locks pci_rescan_remove_lock and probes PCI devices. When PCI devices
+call p2sb_bar() during probe, it locks pci_rescan_remove_lock again.
+Hence the deadlock.
+
+To avoid the deadlock, do not lock pci_rescan_remove_lock in p2sb_bar().
+Instead, do the lock at fs_initcall. Introduce p2sb_cache_resources()
+for fs_initcall which gets and caches the P2SB resources. At p2sb_bar(),
+refer the cache and return to the caller.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Fixes: 9745fb07474f ("platform/x86/intel: Add Primary to Sideband (P2SB) bridge support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Link: https://lore.kernel.org/linux-pci/6xb24fjmptxxn5js2fjrrddjae6twex5bjaftwqsuawuqqqydx@7cl3uik5ef6j/
+Link: https://lore.kernel.org/r/20231229063912.2517922-2-shinichiro.kawasaki@wdc.com
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/p2sb.c | 178 +++++++++++++++++++++++++++---------
+ 1 file changed, 134 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
+index 1cf2471d54dd..fcf1ce8bbdc5 100644
+--- a/drivers/platform/x86/p2sb.c
++++ b/drivers/platform/x86/p2sb.c
+@@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
+       {}
+ };
++/*
++ * Cache BAR0 of P2SB device functions 0 to 7.
++ * TODO: The constant 8 is the number of functions that PCI specification
++ *       defines. Same definitions exist tree-wide. Unify this definition and
++ *       the other definitions then move to include/uapi/linux/pci.h.
++ */
++#define NR_P2SB_RES_CACHE 8
++
++struct p2sb_res_cache {
++      u32 bus_dev_id;
++      struct resource res;
++};
++
++static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
++
+ static int p2sb_get_devfn(unsigned int *devfn)
+ {
+       unsigned int fn = P2SB_DEVFN_DEFAULT;
+@@ -39,8 +54,16 @@ static int p2sb_get_devfn(unsigned int *devfn)
+       return 0;
+ }
++static bool p2sb_valid_resource(struct resource *res)
++{
++      if (res->flags)
++              return true;
++
++      return false;
++}
++
+ /* Copy resource from the first BAR of the device in question */
+-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
++static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+ {
+       struct resource *bar0 = &pdev->resource[0];
+@@ -56,47 +79,64 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+       mem->end = bar0->end;
+       mem->flags = bar0->flags;
+       mem->desc = bar0->desc;
++}
++
++static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
++{
++      struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
++      struct pci_dev *pdev;
++
++      pdev = pci_scan_single_device(bus, devfn);
++      if (!pdev)
++              return;
++
++      p2sb_read_bar0(pdev, &cache->res);
++      cache->bus_dev_id = bus->dev.id;
++
++      pci_stop_and_remove_bus_device(pdev);
++      return;
++}
++
++static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
++{
++      unsigned int slot, fn;
++
++      if (PCI_FUNC(devfn) == 0) {
++              /*
++               * When function number of the P2SB device is zero, scan it and
++               * other function numbers, and if devices are available, cache
++               * their BAR0s.
++               */
++              slot = PCI_SLOT(devfn);
++              for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
++                      p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
++      } else {
++              /* Scan the P2SB device and cache its BAR0 */
++              p2sb_scan_and_cache_devfn(bus, devfn);
++      }
++
++      if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
++              return -ENOENT;
+       return 0;
+ }
+-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+ {
+-      struct pci_dev *pdev;
+-      int ret;
++      static struct pci_bus *p2sb_bus;
+-      pdev = pci_scan_single_device(bus, devfn);
+-      if (!pdev)
+-              return -ENODEV;
++      bus = bus ?: p2sb_bus;
++      if (bus)
++              return bus;
+-      ret = p2sb_read_bar0(pdev, mem);
+-
+-      pci_stop_and_remove_bus_device(pdev);
+-      return ret;
++      /* Assume P2SB is on the bus 0 in domain 0 */
++      p2sb_bus = pci_find_bus(0, 0);
++      return p2sb_bus;
+ }
+-/**
+- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+- * @bus: PCI bus to communicate with
+- * @devfn: PCI slot and function to communicate with
+- * @mem: memory resource to be filled in
+- *
+- * The BIOS prevents the P2SB device from being enumerated by the PCI
+- * subsystem, so we need to unhide and hide it back to lookup the BAR.
+- *
+- * if @bus is NULL, the bus 0 in domain 0 will be used.
+- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+- *
+- * Caller must provide a valid pointer to @mem.
+- *
+- * Locking is handled by pci_rescan_remove_lock mutex.
+- *
+- * Return:
+- * 0 on success or appropriate errno value on error.
+- */
+-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++static int p2sb_cache_resources(void)
+ {
+-      struct pci_dev *pdev_p2sb;
++      struct pci_bus *bus;
+       unsigned int devfn_p2sb;
+       u32 value = P2SBC_HIDE;
+       int ret;
+@@ -106,8 +146,9 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+       if (ret)
+               return ret;
+-      /* if @bus is NULL, use bus 0 in domain 0 */
+-      bus = bus ?: pci_find_bus(0, 0);
++      bus = p2sb_get_bus(NULL);
++      if (!bus)
++              return -ENODEV;
+       /*
+        * Prevent concurrent PCI bus scan from seeing the P2SB device and
+@@ -115,17 +156,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+        */
+       pci_lock_rescan_remove();
+-      /* Unhide the P2SB device, if needed */
++      /*
++       * The BIOS prevents the P2SB device from being enumerated by the PCI
++       * subsystem, so we need to unhide and hide it back to lookup the BAR.
++       * Unhide the P2SB device here, if needed.
++       */
+       pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+       if (value & P2SBC_HIDE)
+               pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+-      pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
+-      if (devfn)
+-              ret = p2sb_scan_and_read(bus, devfn, mem);
+-      else
+-              ret = p2sb_read_bar0(pdev_p2sb, mem);
+-      pci_stop_and_remove_bus_device(pdev_p2sb);
++      ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+       /* Hide the P2SB device, if it was hidden */
+       if (value & P2SBC_HIDE)
+@@ -133,12 +173,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+       pci_unlock_rescan_remove();
+-      if (ret)
+-              return ret;
++      return ret;
++}
+-      if (mem->flags == 0)
++/**
++ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
++ * @bus: PCI bus to communicate with
++ * @devfn: PCI slot and function to communicate with
++ * @mem: memory resource to be filled in
++ *
++ * If @bus is NULL, the bus 0 in domain 0 will be used.
++ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
++ *
++ * Caller must provide a valid pointer to @mem.
++ *
++ * Return:
++ * 0 on success or appropriate errno value on error.
++ */
++int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
++{
++      struct p2sb_res_cache *cache;
++      int ret;
++
++      bus = p2sb_get_bus(bus);
++      if (!bus)
+               return -ENODEV;
++      if (!devfn) {
++              ret = p2sb_get_devfn(&devfn);
++              if (ret)
++                      return ret;
++      }
++
++      cache = &p2sb_resources[PCI_FUNC(devfn)];
++      if (cache->bus_dev_id != bus->dev.id)
++              return -ENODEV;
++
++      if (!p2sb_valid_resource(&cache->res))
++              return -ENOENT;
++
++      memcpy(mem, &cache->res, sizeof(*mem));
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(p2sb_bar);
++
++static int __init p2sb_fs_init(void)
++{
++      p2sb_cache_resources();
++      return 0;
++}
++
++/*
++ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
++ * not be locked in sysfs pci bus rescan path because of deadlock. To
++ * avoid the deadlock, access to P2SB devices with the lock at an early
++ * step in kernel initialization and cache required resources. This
++ * should happen after subsys_initcall which initializes PCI subsystem
++ * and before device_initcall which requires P2SB resources.
++ */
++fs_initcall(p2sb_fs_init);
+-- 
+2.43.0
+
index d550eab06ceac3842d923b21d38a5652fe8d4051..d64a7a726da55ac124428142214cc2338df7e943 100644 (file)
@@ -82,3 +82,9 @@ spi-reintroduce-spi_set_cs_timing.patch
 spi-add-apis-in-spi-core-to-set-get-spi-chip_select-.patch
 spi-atmel-fix-clock-issue-when-using-devices-with-di.patch
 block-renumber-queue_flag_hw_wc.patch
+ksmbd-fix-slab-out-of-bounds-in-smb_strndup_from_utf16.patch
+platform-x86-p2sb-allow-p2sb_bar-calls-during-pci-device-probe.patch
+mm-filemap-avoid-buffered-read-write-race-to-read-inconsistent-data.patch
+mm-migrate-high-order-folios-in-swap-cache-correctly.patch
+mm-memory-failure-cast-index-to-loff_t-before-shifting-it.patch
+mm-memory-failure-check-the-mapcount-of-the-precise-page.patch