--- /dev/null
+From 3433adc8bd09fc9f29b8baddf33b4ecd1ecd2cdc Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Fri, 23 Apr 2021 12:16:25 -0700
+Subject: ARC: entry: fix off-by-one error in syscall number validation
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 3433adc8bd09fc9f29b8baddf33b4ecd1ecd2cdc upstream.
+
+We have NR_syscall syscalls from [0 .. NR_syscall-1].
+However the check for invalid syscall number is "> NR_syscall" as
+opposed to >=. This off-by-one error erronesously allows "NR_syscall"
+to be treated as valid syscall causeing out-of-bounds access into
+syscall-call table ensuing a crash (holes within syscall table have a
+invalid-entry handler but this is beyond the array implementing the
+table).
+
+This problem showed up on v5.6 kernel when testing glibc 2.33 (v5.10
+kernel capable, includng faccessat2 syscall 439). The v5.6 kernel has
+NR_syscalls=439 (0 to 438). Due to the bug, 439 passed by glibc was
+not handled as -ENOSYS but processed leading to a crash.
+
+Link: https://github.com/foss-for-synopsys-dwc-arc-processors/linux/issues/48
+Reported-by: Shahab Vahedi <shahab@synopsys.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arc/kernel/entry.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -165,7 +165,7 @@ tracesys:
+
+ ; Do the Sys Call as we normally would.
+ ; Validate the Sys Call number
+- cmp r8, NR_syscalls
++ cmp r8, NR_syscalls - 1
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+@@ -243,7 +243,7 @@ ENTRY(EV_Trap)
+ ;============ Normal syscall case
+
+ ; syscall num shd not exceed the total system calls avail
+- cmp r8, NR_syscalls
++ cmp r8, NR_syscalls - 1
+ mov.hi r0, -ENOSYS
+ bhi .Lret_from_system_call
+
--- /dev/null
+From c5f756d8c6265ebb1736a7787231f010a3b782e5 Mon Sep 17 00:00:00 2001
+From: Vladimir Isaev <isaev@synopsys.com>
+Date: Tue, 27 Apr 2021 15:12:37 +0300
+Subject: ARC: mm: PAE: use 40-bit physical page mask
+
+From: Vladimir Isaev <isaev@synopsys.com>
+
+commit c5f756d8c6265ebb1736a7787231f010a3b782e5 upstream.
+
+32-bit PAGE_MASK can not be used as a mask for physical addresses
+when PAE is enabled. PAGE_MASK_PHYS must be used for physical
+addresses instead of PAGE_MASK.
+
+Without this, init gets SIGSEGV if pte_modify was called:
+
+| potentially unexpected fatal signal 11.
+| Path: /bin/busybox
+| CPU: 0 PID: 1 Comm: init Not tainted 5.12.0-rc5-00003-g1e43c377a79f-dirty
+| Insn could not be fetched
+| @No matching VMA found
+| ECR: 0x00040000 EFA: 0x00000000 ERET: 0x00000000
+| STAT: 0x80080082 [IE U ] BTA: 0x00000000
+| SP: 0x5f9ffe44 FP: 0x00000000 BLK: 0xaf3d4
+| LPS: 0x000d093e LPE: 0x000d0950 LPC: 0x00000000
+| r00: 0x00000002 r01: 0x5f9fff14 r02: 0x5f9fff20
+| ...
+| Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b
+
+Signed-off-by: Vladimir Isaev <isaev@synopsys.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arc/include/asm/page.h | 12 ++++++++++++
+ arch/arc/include/asm/pgtable.h | 12 +++---------
+ arch/arc/include/uapi/asm/page.h | 1 -
+ arch/arc/mm/ioremap.c | 5 +++--
+ arch/arc/mm/tlb.c | 2 +-
+ 5 files changed, 19 insertions(+), 13 deletions(-)
+
+--- a/arch/arc/include/asm/page.h
++++ b/arch/arc/include/asm/page.h
+@@ -7,6 +7,18 @@
+
+ #include <uapi/asm/page.h>
+
++#ifdef CONFIG_ARC_HAS_PAE40
++
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
++#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
++
++#else /* CONFIG_ARC_HAS_PAE40 */
++
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
++#define PAGE_MASK_PHYS PAGE_MASK
++
++#endif /* CONFIG_ARC_HAS_PAE40 */
++
+ #ifndef __ASSEMBLY__
+
+ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -108,8 +108,8 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
+-
++#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
++ _PAGE_SPECIAL)
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE __pgprot(___DEF)
+ #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+@@ -133,13 +133,7 @@
+ #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+ #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+-#ifdef CONFIG_ARC_HAS_PAE40
+-#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 40
+-#else
+-#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 32
+-#endif
++#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
+
+ /**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+--- a/arch/arc/include/uapi/asm/page.h
++++ b/arch/arc/include/uapi/asm/page.h
+@@ -33,5 +33,4 @@
+
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
+-
+ #endif /* _UAPI__ASM_ARC_PAGE_H */
+--- a/arch/arc/mm/ioremap.c
++++ b/arch/arc/mm/ioremap.c
+@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
+ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ unsigned long flags)
+ {
++ unsigned int off;
+ unsigned long vaddr;
+ struct vm_struct *area;
+- phys_addr_t off, end;
++ phys_addr_t end;
+ pgprot_t prot = __pgprot(flags);
+
+ /* Don't allow wraparound, zero size */
+@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t p
+
+ /* Mappings have to be page-aligned */
+ off = paddr & ~PAGE_MASK;
+- paddr &= PAGE_MASK;
++ paddr &= PAGE_MASK_PHYS;
+ size = PAGE_ALIGN(end + 1) - paddr;
+
+ /*
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -597,7 +597,7 @@ void update_mmu_cache(struct vm_area_str
+ pte_t *ptep)
+ {
+ unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
++ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
+ struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+ create_tlb(vma, vaddr, ptep);
--- /dev/null
+From 4819d16d91145966ce03818a95169df1fd56b299 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 21 Apr 2021 18:33:58 +0300
+Subject: drm/i915: Avoid div-by-zero on gen2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 4819d16d91145966ce03818a95169df1fd56b299 upstream.
+
+Gen2 tiles are 2KiB in size so i915_gem_object_get_tile_row_size()
+can in fact return <4KiB, which leads to div-by-zero here.
+Avoid that.
+
+Not sure i915_gem_object_get_tile_row_size() is entirely
+sane anyway since it doesn't account for the different tile
+layouts on i8xx/i915...
+
+I'm not able to hit this before commit 6846895fde05 ("drm/i915:
+Replace PIN_NONFAULT with calls to PIN_NOEVICT") and it looks
+like I also need to run recent version of Mesa. With those in
+place xonotic trips on this quite easily on my 85x.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210421153401.13847-2-ville.syrjala@linux.intel.com
+(cherry picked from commit ed52c62d386f764194e0184fdb905d5f24194cae)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -181,7 +181,7 @@ compute_partial_view(const struct drm_i9
+ struct i915_ggtt_view view;
+
+ if (i915_gem_object_is_tiled(obj))
+- chunk = roundup(chunk, tile_row_pages(obj));
++ chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = rounddown(page_offset, chunk);
--- /dev/null
+From 227545b9a08c68778ddd89428f99c351fc9315ac Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Fri, 30 Apr 2021 12:56:56 +0800
+Subject: drm/radeon/dpm: Disable sclk switching on Oland when two 4K 60Hz monitors are connected
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+commit 227545b9a08c68778ddd89428f99c351fc9315ac upstream.
+
+Screen flickers rapidly when two 4K 60Hz monitors are in use. This issue
+doesn't happen when one monitor is 4K 60Hz (pixelclock 594MHz) and
+another one is 4K 30Hz (pixelclock 297MHz).
+
+The issue is gone after setting "power_dpm_force_performance_level" to
+"high". Following the indication, we found that the issue occurs when
+sclk is too low.
+
+So resolve the issue by disabling sclk switching when there are two
+monitors requires high pixelclock (> 297MHz).
+
+v2:
+ - Only apply the fix to Oland.
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/radeon.h | 1 +
+ drivers/gpu/drm/radeon/radeon_pm.c | 8 ++++++++
+ drivers/gpu/drm/radeon/si_dpm.c | 3 +++
+ 3 files changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1554,6 +1554,7 @@ struct radeon_dpm {
+ void *priv;
+ u32 new_active_crtcs;
+ int new_active_crtc_count;
++ int high_pixelclock_count;
+ u32 current_active_crtcs;
+ int current_active_crtc_count;
+ bool single_display;
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1720,6 +1720,7 @@ static void radeon_pm_compute_clocks_dpm
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
++ struct radeon_connector *radeon_connector;
+
+ if (!rdev->pm.dpm_enabled)
+ return;
+@@ -1729,6 +1730,7 @@ static void radeon_pm_compute_clocks_dpm
+ /* update active crtc counts */
+ rdev->pm.dpm.new_active_crtcs = 0;
+ rdev->pm.dpm.new_active_crtc_count = 0;
++ rdev->pm.dpm.high_pixelclock_count = 0;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+@@ -1736,6 +1738,12 @@ static void radeon_pm_compute_clocks_dpm
+ if (crtc->enabled) {
+ rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.dpm.new_active_crtc_count++;
++ if (!radeon_crtc->connector)
++ continue;
++
++ radeon_connector = to_radeon_connector(radeon_crtc->connector);
++ if (radeon_connector->pixelclock_for_modeset > 297000)
++ rdev->pm.dpm.high_pixelclock_count++;
+ }
+ }
+ }
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -3002,6 +3002,9 @@ static void si_apply_state_adjust_rules(
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ }
++
++ if (rdev->pm.dpm.high_pixelclock_count > 1)
++ disable_sclk_switching = true;
+ }
+
+ if (rps->vce_active) {
--- /dev/null
+From c3187cf32216313fb316084efac4dab3a8459b1d Mon Sep 17 00:00:00 2001
+From: Jouni Roivas <jouni.roivas@tuxera.com>
+Date: Fri, 14 May 2021 17:27:33 -0700
+Subject: hfsplus: prevent corruption in shrinking truncate
+
+From: Jouni Roivas <jouni.roivas@tuxera.com>
+
+commit c3187cf32216313fb316084efac4dab3a8459b1d upstream.
+
+I believe there are some issues introduced by commit 31651c607151
+("hfsplus: avoid deadlock on file truncation")
+
+HFS+ has extent records which always contains 8 extents. In case the
+first extent record in catalog file gets full, new ones are allocated from
+extents overflow file.
+
+In case shrinking truncate happens to middle of an extent record which
+locates in extents overflow file, the logic in hfsplus_file_truncate() was
+changed so that call to hfs_brec_remove() is not guarded any more.
+
+Right action would be just freeing the extents that exceed the new size
+inside extent record by calling hfsplus_free_extents(), and then check if
+the whole extent record should be removed. However since the guard
+(blk_cnt > start) is now after the call to hfs_brec_remove(), this has
+unfortunate effect that the last matching extent record is removed
+unconditionally.
+
+To reproduce this issue, create a file which has at least 10 extents, and
+then perform shrinking truncate into middle of the last extent record, so
+that the number of remaining extents is not under or divisible by 8. This
+causes the last extent record (8 extents) to be removed totally instead of
+truncating into middle of it. Thus this causes corruption, and lost data.
+
+Fix for this is simply checking if the new truncated end is below the
+start of this extent record, making it safe to remove the full extent
+record. However call to hfs_brec_remove() can't be moved to it's previous
+place since we're dropping ->tree_lock and it can cause a race condition
+and the cached info being invalidated possibly corrupting the node data.
+
+Another issue is related to this one. When entering into the block
+(blk_cnt > start) we are not holding the ->tree_lock. We break out from
+the loop not holding the lock, but hfs_find_exit() does unlock it. Not
+sure if it's possible for someone else to take the lock under our feet,
+but it can cause hard to debug errors and premature unlocking. Even if
+there's no real risk of it, the locking should still always be kept in
+balance. Thus taking the lock now just before the check.
+
+Link: https://lkml.kernel.org/r/20210429165139.3082828-1-jouni.roivas@tuxera.com
+Fixes: 31651c607151f ("hfsplus: avoid deadlock on file truncation")
+Signed-off-by: Jouni Roivas <jouni.roivas@tuxera.com>
+Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
+Cc: Anatoly Trosinenko <anatoly.trosinenko@gmail.com>
+Cc: Viacheslav Dubeyko <slava@dubeyko.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/extents.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode
+ res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+ if (res)
+ break;
+- hfs_brec_remove(&fd);
+
+- mutex_unlock(&fd.tree->tree_lock);
+ start = hip->cached_start;
++ if (blk_cnt <= start)
++ hfs_brec_remove(&fd);
++ mutex_unlock(&fd.tree->tree_lock);
+ hfsplus_free_extents(sb, hip->cached_extents,
+ alloc_cnt - start, alloc_cnt - blk_cnt);
+ hfsplus_dump_extent(hip->cached_extents);
++ mutex_lock(&fd.tree->tree_lock);
+ if (blk_cnt > start) {
+ hip->extent_state |= HFSPLUS_EXT_DIRTY;
+ break;
+@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode
+ alloc_cnt = start;
+ hip->cached_start = hip->cached_blocks = 0;
+ hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
+- mutex_lock(&fd.tree->tree_lock);
+ }
+ hfs_find_exit(&fd);
+
--- /dev/null
+From 22247efd822e6d263f3c8bd327f3f769aea9b1d9 Mon Sep 17 00:00:00 2001
+From: Peter Xu <peterx@redhat.com>
+Date: Fri, 14 May 2021 17:27:04 -0700
+Subject: mm/hugetlb: fix F_SEAL_FUTURE_WRITE
+
+From: Peter Xu <peterx@redhat.com>
+
+commit 22247efd822e6d263f3c8bd327f3f769aea9b1d9 upstream.
+
+Patch series "mm/hugetlb: Fix issues on file sealing and fork", v2.
+
+Hugh reported issue with F_SEAL_FUTURE_WRITE not applied correctly to
+hugetlbfs, which I can easily verify using the memfd_test program, which
+seems that the program is hardly run with hugetlbfs pages (as by default
+shmem).
+
+Meanwhile I found another probably even more severe issue on that hugetlb
+fork won't wr-protect child cow pages, so child can potentially write to
+parent private pages. Patch 2 addresses that.
+
+After this series applied, "memfd_test hugetlbfs" should start to pass.
+
+This patch (of 2):
+
+F_SEAL_FUTURE_WRITE is missing for hugetlb starting from the first day.
+There is a test program for that and it fails constantly.
+
+$ ./memfd_test hugetlbfs
+memfd-hugetlb: CREATE
+memfd-hugetlb: BASIC
+memfd-hugetlb: SEAL-WRITE
+memfd-hugetlb: SEAL-FUTURE-WRITE
+mmap() didn't fail as expected
+Aborted (core dumped)
+
+I think it's probably because no one is really running the hugetlbfs test.
+
+Fix it by checking FUTURE_WRITE also in hugetlbfs_file_mmap() as what we
+do in shmem_mmap(). Generalize a helper for that.
+
+Link: https://lkml.kernel.org/r/20210503234356.9097-1-peterx@redhat.com
+Link: https://lkml.kernel.org/r/20210503234356.9097-2-peterx@redhat.com
+Fixes: ab3948f58ff84 ("mm/memfd: add an F_SEAL_FUTURE_WRITE seal to memfd")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reported-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hugetlbfs/inode.c | 5 +++++
+ include/linux/mm.h | 32 ++++++++++++++++++++++++++++++++
+ mm/shmem.c | 22 ++++------------------
+ 3 files changed, 41 insertions(+), 18 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -135,6 +135,7 @@ static void huge_pagevec_release(struct
+ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ struct inode *inode = file_inode(file);
++ struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+ loff_t len, vma_len;
+ int ret;
+ struct hstate *h = hstate_file(file);
+@@ -150,6 +151,10 @@ static int hugetlbfs_file_mmap(struct fi
+ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ vma->vm_ops = &hugetlb_vm_ops;
+
++ ret = seal_check_future_write(info->seals, vma);
++ if (ret)
++ return ret;
++
+ /*
+ * page based offset in vm_pgoff could be sufficiently large to
+ * overflow a loff_t when converted to byte offset. This can
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2925,5 +2925,37 @@ static inline int pages_identical(struct
+ return !memcmp_pages(page1, page2);
+ }
+
++/**
++ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
++ * @seals: the seals to check
++ * @vma: the vma to operate on
++ *
++ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
++ * the vma flags. Return 0 if check pass, or <0 for errors.
++ */
++static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
++{
++ if (seals & F_SEAL_FUTURE_WRITE) {
++ /*
++ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
++ * "future write" seal active.
++ */
++ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
++ return -EPERM;
++
++ /*
++ * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
++ * MAP_SHARED and read-only, take care to not allow mprotect to
++ * revert protections on such mappings. Do this only for shared
++ * mappings. For private mappings, don't need to mask
++ * VM_MAYWRITE as we still want them to be COW-writable.
++ */
++ if (vma->vm_flags & VM_SHARED)
++ vma->vm_flags &= ~(VM_MAYWRITE);
++ }
++
++ return 0;
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2208,25 +2208,11 @@ out_nomem:
+ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ struct shmem_inode_info *info = SHMEM_I(file_inode(file));
++ int ret;
+
+- if (info->seals & F_SEAL_FUTURE_WRITE) {
+- /*
+- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+- * "future write" seal active.
+- */
+- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+- return -EPERM;
+-
+- /*
+- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+- * MAP_SHARED and read-only, take care to not allow mprotect to
+- * revert protections on such mappings. Do this only for shared
+- * mappings. For private mappings, don't need to mask
+- * VM_MAYWRITE as we still want them to be COW-writable.
+- */
+- if (vma->vm_flags & VM_SHARED)
+- vma->vm_flags &= ~(VM_MAYWRITE);
+- }
++ ret = seal_check_future_write(info->seals, vma);
++ if (ret)
++ return ret;
+
+ file_accessed(file);
+ vma->vm_ops = &shmem_vm_ops;
--- /dev/null
+From aec86b052df6541cc97c5fca44e5934cbea4963b Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 6 May 2021 14:49:59 +1000
+Subject: powerpc/64s: Fix crashes when toggling entry flush barrier
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit aec86b052df6541cc97c5fca44e5934cbea4963b upstream.
+
+The entry flush mitigation can be enabled/disabled at runtime via a
+debugfs file (entry_flush), which causes the kernel to patch itself to
+enable/disable the relevant mitigations.
+
+However depending on which mitigation we're using, it may not be safe to
+do that patching while other CPUs are active. For example the following
+crash:
+
+ sleeper[15639]: segfault (11) at c000000000004c20 nip c000000000004c20 lr c000000000004c20
+
+Shows that we returned to userspace with a corrupted LR that points into
+the kernel, due to executing the partially patched call to the fallback
+entry flush (ie. we missed the LR restore).
+
+Fix it by doing the patching under stop machine. The CPUs that aren't
+doing the patching will be spinning in the core of the stop machine
+logic. That is currently sufficient for our purposes, because none of
+the patching we do is to that code or anywhere in the vicinity.
+
+Fixes: f79643787e0a ("powerpc/64s: flush L1D on kernel entry")
+Cc: stable@vger.kernel.org # v5.10+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210506044959.1298123-2-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/lib/feature-fixups.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -293,8 +293,9 @@ void do_uaccess_flush_fixups(enum l1d_fl
+ : "unknown");
+ }
+
+-void do_entry_flush_fixups(enum l1d_flush_type types)
++static int __do_entry_flush_fixups(void *data)
+ {
++ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
+ unsigned int instrs[3], *dest;
+ long *start, *end;
+ int i;
+@@ -345,6 +346,19 @@ void do_entry_flush_fixups(enum l1d_flus
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
++
++ return 0;
++}
++
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++ /*
++ * The call to the fallback flush can not be safely patched in/out while
++ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
++ * CPU while all other CPUs spin in the stop machine core with interrupts
++ * hard disabled.
++ */
++ stop_machine(__do_entry_flush_fixups, &types, NULL);
+ }
+
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
--- /dev/null
+From 8ec7791bae1327b1c279c5cd6e929c3b12daaf0a Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 6 May 2021 14:49:58 +1000
+Subject: powerpc/64s: Fix crashes when toggling stf barrier
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit 8ec7791bae1327b1c279c5cd6e929c3b12daaf0a upstream.
+
+The STF (store-to-load forwarding) barrier mitigation can be
+enabled/disabled at runtime via a debugfs file (stf_barrier), which
+causes the kernel to patch itself to enable/disable the relevant
+mitigations.
+
+However depending on which mitigation we're using, it may not be safe to
+do that patching while other CPUs are active. For example the following
+crash:
+
+ User access of kernel address (c00000003fff5af0) - exploit attempt? (uid: 0)
+ segfault (11) at c00000003fff5af0 nip 7fff8ad12198 lr 7fff8ad121f8 code 1
+ code: 40820128 e93c00d0 e9290058 7c292840 40810058 38600000 4bfd9a81 e8410018
+ code: 2c030006 41810154 3860ffb6 e9210098 <e94d8ff0> 7d295279 39400000 40820a3c
+
+Shows that we returned to userspace without restoring the user r13
+value, due to executing the partially patched STF exit code.
+
+Fix it by doing the patching under stop machine. The CPUs that aren't
+doing the patching will be spinning in the core of the stop machine
+logic. That is currently sufficient for our purposes, because none of
+the patching we do is to that code or anywhere in the vicinity.
+
+Fixes: a048a07d7f45 ("powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit")
+Cc: stable@vger.kernel.org # v4.17+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210506044959.1298123-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/lib/feature-fixups.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -14,6 +14,7 @@
+ #include <linux/string.h>
+ #include <linux/init.h>
+ #include <linux/sched/mm.h>
++#include <linux/stop_machine.h>
+ #include <asm/cputable.h>
+ #include <asm/code-patching.h>
+ #include <asm/page.h>
+@@ -221,11 +222,25 @@ static void do_stf_exit_barrier_fixups(e
+ : "unknown");
+ }
+
++static int __do_stf_barrier_fixups(void *data)
++{
++ enum stf_barrier_type *types = data;
++
++ do_stf_entry_barrier_fixups(*types);
++ do_stf_exit_barrier_fixups(*types);
++
++ return 0;
++}
+
+ void do_stf_barrier_fixups(enum stf_barrier_type types)
+ {
+- do_stf_entry_barrier_fixups(types);
+- do_stf_exit_barrier_fixups(types);
++ /*
++ * The call to the fallback entry flush, and the fallback/sync-ori exit
++ * flush can not be safely patched in/out while other CPUs are executing
++ * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
++ * spin in the stop machine core with interrupts hard disabled.
++ */
++ stop_machine(__do_stf_barrier_fixups, &types, NULL);
+ }
+
+ void do_uaccess_flush_fixups(enum l1d_flush_type types)
i40e-fix-use-after-free-in-i40e_client_subtask.patch
i40e-fix-the-restart-auto-negotiation-after-fec-modi.patch
i40e-fix-phy-type-identifiers-for-2.5g-and-5g-adapte.patch
+arc-entry-fix-off-by-one-error-in-syscall-number-validation.patch
+arc-mm-pae-use-40-bit-physical-page-mask.patch
+powerpc-64s-fix-crashes-when-toggling-stf-barrier.patch
+powerpc-64s-fix-crashes-when-toggling-entry-flush-barrier.patch
+hfsplus-prevent-corruption-in-shrinking-truncate.patch
+squashfs-fix-divide-error-in-calculate_skip.patch
+userfaultfd-release-page-in-error-path-to-avoid-bug_on.patch
+mm-hugetlb-fix-f_seal_future_write.patch
+drm-radeon-dpm-disable-sclk-switching-on-oland-when-two-4k-60hz-monitors-are-connected.patch
+drm-i915-avoid-div-by-zero-on-gen2.patch
--- /dev/null
+From d6e621de1fceb3b098ebf435ef7ea91ec4838a1a Mon Sep 17 00:00:00 2001
+From: Phillip Lougher <phillip@squashfs.org.uk>
+Date: Fri, 14 May 2021 17:27:16 -0700
+Subject: squashfs: fix divide error in calculate_skip()
+
+From: Phillip Lougher <phillip@squashfs.org.uk>
+
+commit d6e621de1fceb3b098ebf435ef7ea91ec4838a1a upstream.
+
+Sysbot has reported a "divide error" which has been identified as being
+caused by a corrupted file_size value within the file inode. This value
+has been corrupted to a much larger value than expected.
+
+Calculate_skip() is passed i_size_read(inode) >> msblk->block_log. Due to
+the file_size value corruption this overflows the int argument/variable in
+that function, leading to the divide error.
+
+This patch changes the function to use u64. This will accommodate any
+unexpectedly large values due to corruption.
+
+The value returned from calculate_skip() is clamped to be never more than
+SQUASHFS_CACHED_BLKS - 1, or 7. So file_size corruption does not lead to
+an unexpectedly large return result here.
+
+Link: https://lkml.kernel.org/r/20210507152618.9447-1-phillip@squashfs.org.uk
+Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk>
+Reported-by: <syzbot+e8f781243ce16ac2f962@syzkaller.appspotmail.com>
+Reported-by: <syzbot+7b98870d4fec9447b951@syzkaller.appspotmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/squashfs/file.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -211,11 +211,11 @@ failure:
+ * If the skip factor is limited in this way then the file will use multiple
+ * slots.
+ */
+-static inline int calculate_skip(int blocks)
++static inline int calculate_skip(u64 blocks)
+ {
+- int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
++ u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ * SQUASHFS_META_INDEXES);
+- return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
++ return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
+ }
+
+
--- /dev/null
+From 7ed9d238c7dbb1fdb63ad96a6184985151b0171c Mon Sep 17 00:00:00 2001
+From: Axel Rasmussen <axelrasmussen@google.com>
+Date: Fri, 14 May 2021 17:27:19 -0700
+Subject: userfaultfd: release page in error path to avoid BUG_ON
+
+From: Axel Rasmussen <axelrasmussen@google.com>
+
+commit 7ed9d238c7dbb1fdb63ad96a6184985151b0171c upstream.
+
+Consider the following sequence of events:
+
+1. Userspace issues a UFFD ioctl, which ends up calling into
+ shmem_mfill_atomic_pte(). We successfully account the blocks, we
+ shmem_alloc_page(), but then the copy_from_user() fails. We return
+ -ENOENT. We don't release the page we allocated.
+2. Our caller detects this error code, tries the copy_from_user() after
+ dropping the mmap_lock, and retries, calling back into
+ shmem_mfill_atomic_pte().
+3. Meanwhile, let's say another process filled up the tmpfs being used.
+4. So shmem_mfill_atomic_pte() fails to account blocks this time, and
+ immediately returns - without releasing the page.
+
+This triggers a BUG_ON in our caller, which asserts that the page
+should always be consumed, unless -ENOENT is returned.
+
+To fix this, detect if we have such a "dangling" page when accounting
+fails, and if so, release it before returning.
+
+Link: https://lkml.kernel.org/r/20210428230858.348400-1-axelrasmussen@google.com
+Fixes: cb658a453b93 ("userfaultfd: shmem: avoid leaking blocks and used blocks in UFFDIO_COPY")
+Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
+Reported-by: Hugh Dickins <hughd@google.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Reviewed-by: Peter Xu <peterx@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/shmem.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2327,8 +2327,18 @@ static int shmem_mfill_atomic_pte(struct
+ pgoff_t offset, max_off;
+
+ ret = -ENOMEM;
+- if (!shmem_inode_acct_block(inode, 1))
++ if (!shmem_inode_acct_block(inode, 1)) {
++ /*
++ * We may have got a page, returned -ENOENT triggering a retry,
++ * and now we find ourselves with -ENOMEM. Release the page, to
++ * avoid a BUG_ON in our caller.
++ */
++ if (unlikely(*pagep)) {
++ put_page(*pagep);
++ *pagep = NULL;
++ }
+ goto out;
++ }
+
+ if (!*pagep) {
+ page = shmem_alloc_page(gfp, info, pgoff);