]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 07:24:07 +0000 (08:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 07:24:07 +0000 (08:24 +0100)
added patches:
arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch
drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch
team-fix-use-after-free-when-an-option-instance-allocation-fails.patch

queue-5.15/arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch [new file with mode: 0644]
queue-5.15/drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/team-fix-use-after-free-when-an-option-instance-allocation-fails.patch [new file with mode: 0644]

diff --git a/queue-5.15/arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch b/queue-5.15/arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch
new file mode 100644 (file)
index 0000000..339a0cc
--- /dev/null
@@ -0,0 +1,76 @@
+From 3c0696076aad60a2f04c019761921954579e1b0e Mon Sep 17 00:00:00 2001
+From: James Houghton <jthoughton@google.com>
+Date: Mon, 4 Dec 2023 17:26:46 +0000
+Subject: arm64: mm: Always make sw-dirty PTEs hw-dirty in pte_modify
+
+From: James Houghton <jthoughton@google.com>
+
+commit 3c0696076aad60a2f04c019761921954579e1b0e upstream.
+
+It is currently possible for a userspace application to enter an
+infinite page fault loop when using HugeTLB pages implemented with
+contiguous PTEs when HAFDBS is not available. This happens because:
+
+1. The kernel may sometimes write PTEs that are sw-dirty but hw-clean
+   (PTE_DIRTY | PTE_RDONLY | PTE_WRITE).
+
+2. If, during a write, the CPU uses a sw-dirty, hw-clean PTE in handling
+   the memory access on a system without HAFDBS, we will get a page
+   fault.
+
+3. HugeTLB will check if it needs to update the dirty bits on the PTE.
+   For contiguous PTEs, it will check to see if the pgprot bits need
+   updating. In this case, HugeTLB wants to write a sequence of
+   sw-dirty, hw-dirty PTEs, but it finds that all the PTEs it is about
+   to overwrite are all pte_dirty() (pte_sw_dirty() => pte_dirty()),
+   so it thinks no update is necessary.
+
+We can get the kernel to write a sw-dirty, hw-clean PTE with the
+following steps (showing the relevant VMA flags and pgprot bits):
+
+i.   Create a valid, writable contiguous PTE.
+       VMA vmflags:     VM_SHARED | VM_READ | VM_WRITE
+       VMA pgprot bits: PTE_RDONLY | PTE_WRITE
+       PTE pgprot bits: PTE_DIRTY | PTE_WRITE
+
+ii.  mprotect the VMA to PROT_NONE.
+       VMA vmflags:     VM_SHARED
+       VMA pgprot bits: PTE_RDONLY
+       PTE pgprot bits: PTE_DIRTY | PTE_RDONLY
+
+iii. mprotect the VMA back to PROT_READ | PROT_WRITE.
+       VMA vmflags:     VM_SHARED | VM_READ | VM_WRITE
+       VMA pgprot bits: PTE_RDONLY | PTE_WRITE
+       PTE pgprot bits: PTE_DIRTY | PTE_WRITE | PTE_RDONLY
+
+Make it impossible to create a writeable sw-dirty, hw-clean PTE with
+pte_modify(). Such a PTE should be impossible to create, and there may
+be places that assume that pte_dirty() implies pte_hw_dirty().
+
+Signed-off-by: James Houghton <jthoughton@google.com>
+Fixes: 031e6e6b4e12 ("arm64: hugetlb: Avoid unnecessary clearing in huge_ptep_set_access_flags")
+Cc: <stable@vger.kernel.org>
+Acked-by: Will Deacon <will@kernel.org>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Link: https://lore.kernel.org/r/20231204172646.2541916-3-jthoughton@google.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/pgtable.h |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -766,6 +766,12 @@ static inline pte_t pte_modify(pte_t pte
+       if (pte_hw_dirty(pte))
+               pte = pte_mkdirty(pte);
+       pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
++      /*
++       * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
++       * dirtiness again.
++       */
++      if (pte_sw_dirty(pte))
++              pte = pte_mkdirty(pte);
+       return pte;
+ }
diff --git a/queue-5.15/drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch b/queue-5.15/drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch
new file mode 100644 (file)
index 0000000..536745f
--- /dev/null
@@ -0,0 +1,81 @@
+From ab4750332dbe535243def5dcebc24ca00c1f98ac Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 7 Dec 2023 10:14:41 -0500
+Subject: drm/amdgpu/sdma5.2: add begin/end_use ring callbacks
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit ab4750332dbe535243def5dcebc24ca00c1f98ac upstream.
+
+Add begin/end_use ring callbacks to disallow GFXOFF when
+SDMA work is submitted and allow it again afterward.
+
+This should avoid corner cases where GFXOFF is erroneously
+entered when SDMA is still active.  For now just allow/disallow
+GFXOFF in the begin and end helpers until we root cause the
+issue.  This should not impact power as SDMA usage is pretty
+minimal and GFXOSS should not be active when SDMA is active
+anyway, this just makes it explicit.
+
+v2: move everything into sdma5.2 code.  No reason for this
+to be generic at this point.
+v3: Add comments in new code
+
+Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2220
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com> (v1)
+Tested-by: Mario Limonciello <mario.limonciello@amd.com> (v1)
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.15+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c |   28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -1660,6 +1660,32 @@ static void sdma_v5_2_get_clockgating_st
+               *flags |= AMD_CG_SUPPORT_SDMA_LS;
+ }
++static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
++{
++      struct amdgpu_device *adev = ring->adev;
++
++      /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++       * disallow GFXOFF in some cases leading to
++       * hangs in SDMA.  Disallow GFXOFF while SDMA is active.
++       * We can probably just limit this to 5.2.3,
++       * but it shouldn't hurt for other parts since
++       * this GFXOFF will be disallowed anyway when SDMA is
++       * active, this just makes it explicit.
++       */
++      amdgpu_gfx_off_ctrl(adev, false);
++}
++
++static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
++{
++      struct amdgpu_device *adev = ring->adev;
++
++      /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
++       * disallow GFXOFF in some cases leading to
++       * hangs in SDMA.  Allow GFXOFF when SDMA is complete.
++       */
++      amdgpu_gfx_off_ctrl(adev, true);
++}
++
+ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
+       .name = "sdma_v5_2",
+       .early_init = sdma_v5_2_early_init,
+@@ -1707,6 +1733,8 @@ static const struct amdgpu_ring_funcs sd
+       .test_ib = sdma_v5_2_ring_test_ib,
+       .insert_nop = sdma_v5_2_ring_insert_nop,
+       .pad_ib = sdma_v5_2_ring_pad_ib,
++      .begin_use = sdma_v5_2_ring_begin_use,
++      .end_use = sdma_v5_2_ring_end_use,
+       .emit_wreg = sdma_v5_2_ring_emit_wreg,
+       .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
index 1cb93ca1a57889c243741c16d16ec241a1ef916e..88b4e55823662ddc3e52b62e940e6f12f20a7f1c 100644 (file)
@@ -72,3 +72,6 @@ perf-fix-perf_event_validate_size-lockdep-splat.patch
 btrfs-do-not-allow-non-subvolume-root-targets-for-snapshot.patch
 soundwire-stream-fix-null-pointer-dereference-for-multi_link.patch
 ext4-prevent-the-normalized-size-from-exceeding-ext_max_blocks.patch
+arm64-mm-always-make-sw-dirty-ptes-hw-dirty-in-pte_modify.patch
+team-fix-use-after-free-when-an-option-instance-allocation-fails.patch
+drm-amdgpu-sdma5.2-add-begin-end_use-ring-callbacks.patch
diff --git a/queue-5.15/team-fix-use-after-free-when-an-option-instance-allocation-fails.patch b/queue-5.15/team-fix-use-after-free-when-an-option-instance-allocation-fails.patch
new file mode 100644 (file)
index 0000000..1503812
--- /dev/null
@@ -0,0 +1,51 @@
+From c12296bbecc488623b7d1932080e394d08f3226b Mon Sep 17 00:00:00 2001
+From: Florent Revest <revest@chromium.org>
+Date: Wed, 6 Dec 2023 13:37:18 +0100
+Subject: team: Fix use-after-free when an option instance allocation fails
+
+From: Florent Revest <revest@chromium.org>
+
+commit c12296bbecc488623b7d1932080e394d08f3226b upstream.
+
+In __team_options_register, team_options are allocated and appended to
+the team's option_list.
+If one option instance allocation fails, the "inst_rollback" cleanup
+path frees the previously allocated options but doesn't remove them from
+the team's option_list.
+This leaves dangling pointers that can be dereferenced later by other
+parts of the team driver that iterate over options.
+
+This patch fixes the cleanup path to remove the dangling pointers from
+the list.
+
+As far as I can tell, this uaf doesn't have much security implications
+since it would be fairly hard to exploit (an attacker would need to make
+the allocation of that specific small object fail) but it's still nice
+to fix.
+
+Cc: stable@vger.kernel.org
+Fixes: 80f7c6683fe0 ("team: add support for per-port options")
+Signed-off-by: Florent Revest <revest@chromium.org>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://lore.kernel.org/r/20231206123719.1963153-1-revest@chromium.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -285,8 +285,10 @@ static int __team_options_register(struc
+       return 0;
+ inst_rollback:
+-      for (i--; i >= 0; i--)
++      for (i--; i >= 0; i--) {
+               __team_option_inst_del_option(team, dst_opts[i]);
++              list_del(&dst_opts[i]->list);
++      }
+       i = option_count;
+ alloc_rollback: