--- /dev/null
+From 8692969e9164c15474b356b9898e5b9b21a85643 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Mon, 6 Jun 2022 19:31:42 -0400
+Subject: ceph: wait on async create before checking caps for syncfs
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 8692969e9164c15474b356b9898e5b9b21a85643 upstream.
+
+Currently, we'll call ceph_check_caps, but if we're still waiting
+on the reply, we'll end up spinning around on the same inode in
+flush_dirty_session_caps. Wait for the async create reply before
+flushing caps.
+
+Cc: stable@vger.kernel.org
+URL: https://tracker.ceph.com/issues/55823
+Fixes: fbed7045f552 ("ceph: wait for async create reply before sending any cap messages")
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: Xiubo Li <xiubli@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4358,6 +4358,7 @@ static void flush_dirty_session_caps(str
+ ihold(inode);
+ dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
+ spin_unlock(&mdsc->cap_dirty_lock);
++ ceph_wait_on_async_create(inode);
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
+ iput(inode);
+ spin_lock(&mdsc->cap_dirty_lock);
--- /dev/null
+From b376471fb47d4905e72fe73e9eeed228f8f2f230 Mon Sep 17 00:00:00 2001
+From: Jinzhou Su <Jinzhou.Su@amd.com>
+Date: Thu, 23 Jun 2022 11:15:09 +0800
+Subject: cpufreq: amd-pstate: Add resume and suspend callbacks
+
+From: Jinzhou Su <Jinzhou.Su@amd.com>
+
+commit b376471fb47d4905e72fe73e9eeed228f8f2f230 upstream.
+
+When system resumes from S3, the CPPC enable register will be
+cleared and reset to 0.
+
+So enable the CPPC interface by writing 1 to this register on
+system resume and disable it during system suspend.
+
+Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com>
+Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com>
+Acked-by: Huang Rui <ray.huang@amd.com>
+[ rjw: Subject and changelog edits ]
+Cc: All applicable <stable@vger.kernel.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/amd-pstate.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -566,6 +566,28 @@ static int amd_pstate_cpu_exit(struct cp
+ return 0;
+ }
+
++static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
++{
++ int ret;
++
++ ret = amd_pstate_enable(true);
++ if (ret)
++ pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
++
++ return ret;
++}
++
++static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
++{
++ int ret;
++
++ ret = amd_pstate_enable(false);
++ if (ret)
++ pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
++
++ return ret;
++}
++
+ /* Sysfs attributes */
+
+ /*
+@@ -636,6 +658,8 @@ static struct cpufreq_driver amd_pstate_
+ .target = amd_pstate_target,
+ .init = amd_pstate_cpu_init,
+ .exit = amd_pstate_cpu_exit,
++ .suspend = amd_pstate_cpu_suspend,
++ .resume = amd_pstate_cpu_resume,
+ .set_boost = amd_pstate_set_boost,
+ .name = "amd-pstate",
+ .attr = amd_pstate_attr,
--- /dev/null
+From 332bd0778775d0cf105c4b9e03e460b590749916 Mon Sep 17 00:00:00 2001
+From: Heinz Mauelshagen <heinzm@redhat.com>
+Date: Tue, 28 Jun 2022 00:37:22 +0200
+Subject: dm raid: fix accesses beyond end of raid member array
+
+From: Heinz Mauelshagen <heinzm@redhat.com>
+
+commit 332bd0778775d0cf105c4b9e03e460b590749916 upstream.
+
+On dm-raid table load (using raid_ctr), dm-raid allocates an array
+rs->devs[rs->raid_disks] for the raid device members. rs->raid_disks
+is defined by the number of raid metadata and image tupples passed
+into the target's constructor.
+
+In the case of RAID layout changes being requested, that number can be
+different from the current number of members for existing raid sets as
+defined in their superblocks. Example RAID layout changes include:
+- raid1 legs being added/removed
+- raid4/5/6/10 number of stripes changed (stripe reshaping)
+- takeover to higher raid level (e.g. raid5 -> raid6)
+
+When accessing array members, rs->raid_disks must be used in control
+loops instead of the potentially larger value in rs->md.raid_disks.
+Otherwise it will cause memory access beyond the end of the rs->devs
+array.
+
+Fix this by changing code that is prone to out-of-bounds access.
+Also fix validate_raid_redundancy() to validate all devices that are
+added. Also, use braces to help clean up raid_iterate_devices().
+
+The out-of-bounds memory accesses was discovered using KASAN.
+
+This commit was verified to pass all LVM2 RAID tests (with KASAN
+enabled).
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-raid.c | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1001,12 +1001,13 @@ static int validate_region_size(struct r
+ static int validate_raid_redundancy(struct raid_set *rs)
+ {
+ unsigned int i, rebuild_cnt = 0;
+- unsigned int rebuilds_per_group = 0, copies;
++ unsigned int rebuilds_per_group = 0, copies, raid_disks;
+ unsigned int group_size, last_group_start;
+
+- for (i = 0; i < rs->md.raid_disks; i++)
+- if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+- !rs->dev[i].rdev.sb_page)
++ for (i = 0; i < rs->raid_disks; i++)
++ if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
++ ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
++ !rs->dev[i].rdev.sb_page)))
+ rebuild_cnt++;
+
+ switch (rs->md.level) {
+@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(stru
+ * A A B B C
+ * C D D E E
+ */
++ raid_disks = min(rs->raid_disks, rs->md.raid_disks);
+ if (__is_raid10_near(rs->md.new_layout)) {
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < raid_disks; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
+ if ((!rs->dev[i].rdev.sb_page ||
+@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(stru
+ * results in the need to treat the last (potentially larger)
+ * set differently.
+ */
+- group_size = (rs->md.raid_disks / copies);
+- last_group_start = (rs->md.raid_disks / group_size) - 1;
++ group_size = (raid_disks / copies);
++ last_group_start = (raid_disks / group_size) - 1;
+ last_group_start *= group_size;
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < raid_disks; i++) {
+ if (!(i % copies) && !(i > last_group_start))
+ rebuilds_per_group = 0;
+ if ((!rs->dev[i].rdev.sb_page ||
+@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct ra
+ {
+ int i;
+
+- for (i = 0; i < rs->md.raid_disks; i++) {
++ for (i = 0; i < rs->raid_disks; i++) {
+ struct md_rdev *rdev = &rs->dev[i].rdev;
+
+ if (!test_bit(Journal, &rdev->flags) &&
+@@ -3771,13 +3773,13 @@ static int raid_iterate_devices(struct d
+ unsigned int i;
+ int r = 0;
+
+- for (i = 0; !r && i < rs->md.raid_disks; i++)
+- if (rs->dev[i].data_dev)
+- r = fn(ti,
+- rs->dev[i].data_dev,
+- 0, /* No offset on data devs */
+- rs->md.dev_sectors,
+- data);
++ for (i = 0; !r && i < rs->raid_disks; i++) {
++ if (rs->dev[i].data_dev) {
++ r = fn(ti, rs->dev[i].data_dev,
++ 0, /* No offset on data devs */
++ rs->md.dev_sectors, data);
++ }
++ }
+
+ return r;
+ }
--- /dev/null
+From 617b365872a247480e9dcd50a32c8d1806b21861 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 29 Jun 2022 13:40:57 -0400
+Subject: dm raid: fix KASAN warning in raid5_add_disks
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 617b365872a247480e9dcd50a32c8d1806b21861 upstream.
+
+There's a KASAN warning in raid5_add_disk when running the LVM testsuite.
+The warning happens in the test
+lvconvert-raid-reshape-linear_to_raid6-single-type.sh. We fix the warning
+by verifying that rdev->saved_raid_disk is within limits.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid5.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -8023,6 +8023,7 @@ static int raid5_add_disk(struct mddev *
+ */
+ if (rdev->saved_raid_disk >= 0 &&
+ rdev->saved_raid_disk >= first &&
++ rdev->saved_raid_disk <= last &&
+ conf->disks[rdev->saved_raid_disk].rdev == NULL)
+ first = rdev->saved_raid_disk;
+
--- /dev/null
+From bbba251577b27422ebe173e1bd006424d6a8cfb3 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 16 Jun 2022 16:52:01 -0400
+Subject: drm/amdgpu: fix adev variable used in amdgpu_device_gpu_recover()
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit bbba251577b27422ebe173e1bd006424d6a8cfb3 upstream.
+
+Use the correct adev variable for the drm_fb_helper in
+amdgpu_device_gpu_recover(). Noticed by inspection.
+
+Fixes: 087451f372bf ("drm/amdgpu: use generic fb helpers instead of setting up AMD own's.")
+Reviewed-by: Guchun Chen <guchun.chen@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -5140,7 +5140,7 @@ int amdgpu_device_gpu_recover_imp(struct
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
+- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
++ drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
+
+ /* disable ras on ALL IPs */
+ if (!need_emergency_restart &&
--- /dev/null
+From 5cb0e3fb2c54eabfb3f932a1574bff1774946bc0 Mon Sep 17 00:00:00 2001
+From: Ruili Ji <ruiliji2@amd.com>
+Date: Wed, 22 Jun 2022 14:20:22 +0800
+Subject: drm/amdgpu: To flush tlb for MMHUB of RAVEN series
+
+From: Ruili Ji <ruiliji2@amd.com>
+
+commit 5cb0e3fb2c54eabfb3f932a1574bff1774946bc0 upstream.
+
+amdgpu: [mmhub0] no-retry page fault (src_id:0 ring:40 vmid:8 pasid:32769, for process test_basic pid 3305 thread test_basic pid 3305)
+amdgpu: in page starting at address 0x00007ff990003000 from IH client 0x12 (VMC)
+amdgpu: VM_L2_PROTECTION_FAULT_STATUS:0x00840051
+amdgpu: Faulty UTCL2 client ID: MP1 (0x0)
+amdgpu: MORE_FAULTS: 0x1
+amdgpu: WALKER_ERROR: 0x0
+amdgpu: PERMISSION_FAULTS: 0x5
+amdgpu: MAPPING_ERROR: 0x0
+amdgpu: RW: 0x1
+
+When memory is allocated by kfd, no one triggers the tlb flush for MMHUB0.
+There is page fault from MMHUB0.
+
+v2:fix indentation
+v3:change subject and fix indentation
+
+Signed-off-by: Ruili Ji <ruiliji2@amd.com>
+Reviewed-by: Philip Yang <philip.yang@amd.com>
+Reviewed-by: Aaron Liu <aaron.liu@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+@@ -703,7 +703,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(st
+ {
+ bool all_hub = false;
+
+- if (adev->family == AMDGPU_FAMILY_AI)
++ if (adev->family == AMDGPU_FAMILY_AI ||
++ adev->family == AMDGPU_FAMILY_RV)
+ all_hub = true;
+
+ return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
--- /dev/null
+From 3b0dc529f56b5f2328244130683210be98f16f7f Mon Sep 17 00:00:00 2001
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Thu, 23 Jun 2022 14:00:15 +0200
+Subject: ipv6: take care of disable_policy when restoring routes
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+commit 3b0dc529f56b5f2328244130683210be98f16f7f upstream.
+
+When routes corresponding to addresses are restored by
+fixup_permanent_addr(), the dst_nopolicy parameter was not set.
+The typical use case is a user that configures an address on a down
+interface and then put this interface up.
+
+Let's take care of this flag in addrconf_f6i_alloc(), so that every callers
+benefit ont it.
+
+CC: stable@kernel.org
+CC: David Forster <dforster@brocade.com>
+Fixes: df789fe75206 ("ipv6: Provide ipv6 version of "disable_policy" sysctl")
+Reported-by: Siwar Zitouni <siwar.zitouni@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://lore.kernel.org/r/20220623120015.32640-1-nicolas.dichtel@6wind.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c | 4 ----
+ net/ipv6/route.c | 9 ++++++++-
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1109,10 +1109,6 @@ ipv6_add_addr(struct inet6_dev *idev, st
+ goto out;
+ }
+
+- if (net->ipv6.devconf_all->disable_policy ||
+- idev->cnf.disable_policy)
+- f6i->dst_nopolicy = true;
+-
+ neigh_parms_data_state_setall(idev->nd_parms);
+
+ ifa->addr = *cfg->pfx;
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4565,8 +4565,15 @@ struct fib6_info *addrconf_f6i_alloc(str
+ }
+
+ f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
+- if (!IS_ERR(f6i))
++ if (!IS_ERR(f6i)) {
+ f6i->dst_nocount = true;
++
++ if (!anycast &&
++ (net->ipv6.devconf_all->disable_policy ||
++ idev->cnf.disable_policy))
++ f6i->dst_nopolicy = true;
++ }
++
+ return f6i;
+ }
+
--- /dev/null
+From b5e5f9dfc915ff05b41dff56181e1dae101712bd Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sun, 19 Jun 2022 22:37:17 +0900
+Subject: ksmbd: check invalid FileOffset and BeyondFinalZero in FSCTL_ZERO_DATA
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit b5e5f9dfc915ff05b41dff56181e1dae101712bd upstream.
+
+FileOffset should not be greater than BeyondFinalZero in FSCTL_ZERO_DATA.
+And don't call ksmbd_vfs_zero_data() if length is zero.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/smb2pdu.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -7705,7 +7705,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ {
+ struct file_zero_data_information *zero_data;
+ struct ksmbd_file *fp;
+- loff_t off, len;
++ loff_t off, len, bfz;
+
+ if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+ ksmbd_debug(SMB,
+@@ -7722,19 +7722,26 @@ int smb2_ioctl(struct ksmbd_work *work)
+ zero_data =
+ (struct file_zero_data_information *)&req->Buffer[0];
+
+- fp = ksmbd_lookup_fd_fast(work, id);
+- if (!fp) {
+- ret = -ENOENT;
++ off = le64_to_cpu(zero_data->FileOffset);
++ bfz = le64_to_cpu(zero_data->BeyondFinalZero);
++ if (off > bfz) {
++ ret = -EINVAL;
+ goto out;
+ }
+
+- off = le64_to_cpu(zero_data->FileOffset);
+- len = le64_to_cpu(zero_data->BeyondFinalZero) - off;
+-
+- ret = ksmbd_vfs_zero_data(work, fp, off, len);
+- ksmbd_fd_put(work, fp);
+- if (ret < 0)
+- goto out;
++ len = bfz - off;
++ if (len) {
++ fp = ksmbd_lookup_fd_fast(work, id);
++ if (!fp) {
++ ret = -ENOENT;
++ goto out;
++ }
++
++ ret = ksmbd_vfs_zero_data(work, fp, off, len);
++ ksmbd_fd_put(work, fp);
++ if (ret < 0)
++ goto out;
++ }
+ break;
+ }
+ case FSCTL_QUERY_ALLOCATED_RANGES:
--- /dev/null
+From 18e39fb960e6a908ac5230b57e3d0d6c25232368 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sun, 19 Jun 2022 22:35:48 +0900
+Subject: ksmbd: set the range of bytes to zero without extending file size in FSCTL_ZERO_DATA
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 18e39fb960e6a908ac5230b57e3d0d6c25232368 upstream.
+
+generic/091, 263 test failed since commit f66f8b94e7f2 ("cifs: when
+extending a file with falloc we should make files not-sparse").
+FSCTL_ZERO_DATA sets the range of bytes to zero without extending file
+size. The VFS_FALLOCATE_FL_KEEP_SIZE flag should be used even on
+non-sparse files.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/vfs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/ksmbd/vfs.c
++++ b/fs/ksmbd/vfs.c
+@@ -1015,7 +1015,9 @@ int ksmbd_vfs_zero_data(struct ksmbd_wor
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ off, len);
+
+- return vfs_fallocate(fp->filp, FALLOC_FL_ZERO_RANGE, off, len);
++ return vfs_fallocate(fp->filp,
++ FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE,
++ off, len);
+ }
+
+ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
--- /dev/null
+From 067baa9a37b32b95fdeabccde4b0cb6a2cf95f96 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Sat, 25 Jun 2022 13:01:08 +0200
+Subject: ksmbd: use vfs_llseek instead of dereferencing NULL
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 067baa9a37b32b95fdeabccde4b0cb6a2cf95f96 upstream.
+
+By not checking whether llseek is NULL, this might jump to NULL. Also,
+it doesn't check FMODE_LSEEK. Fix this by using vfs_llseek(), which
+always does the right thing.
+
+Fixes: f44158485826 ("cifsd: add file operations")
+Cc: stable@vger.kernel.org
+Cc: linux-cifs@vger.kernel.org
+Cc: Ronnie Sahlberg <lsahlber@redhat.com>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Reviewed-by: Namjae Jeon <linkinjeon@kernel.org>
+Acked-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/vfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ksmbd/vfs.c
++++ b/fs/ksmbd/vfs.c
+@@ -1048,7 +1048,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_fi
+ *out_count = 0;
+ end = start + length;
+ while (start < end && *out_count < in_count) {
+- extent_start = f->f_op->llseek(f, start, SEEK_DATA);
++ extent_start = vfs_llseek(f, start, SEEK_DATA);
+ if (extent_start < 0) {
+ if (extent_start != -ENXIO)
+ ret = (int)extent_start;
+@@ -1058,7 +1058,7 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_fi
+ if (extent_start >= end)
+ break;
+
+- extent_end = f->f_op->llseek(f, extent_start, SEEK_HOLE);
++ extent_end = vfs_llseek(f, extent_start, SEEK_HOLE);
+ if (extent_end < 0) {
+ if (extent_end != -ENXIO)
+ ret = (int)extent_end;
--- /dev/null
+From 1758bde2e4aa5ff188d53e7d9d388bbb7e12eebb Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Tue, 28 Jun 2022 12:15:08 +0200
+Subject: net: phy: Don't trigger state machine while in suspend
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 1758bde2e4aa5ff188d53e7d9d388bbb7e12eebb upstream.
+
+Upon system sleep, mdio_bus_phy_suspend() stops the phy_state_machine(),
+but subsequent interrupts may retrigger it:
+
+They may have been left enabled to facilitate wakeup and are not
+quiesced until the ->suspend_noirq() phase. Unwanted interrupts may
+hence occur between mdio_bus_phy_suspend() and dpm_suspend_noirq(),
+as well as between dpm_resume_noirq() and mdio_bus_phy_resume().
+
+Retriggering the phy_state_machine() through an interrupt is not only
+undesirable for the reason given in mdio_bus_phy_suspend() (freezing it
+midway with phydev->lock held), but also because the PHY may be
+inaccessible after it's suspended: Accesses to USB-attached PHYs are
+blocked once usb_suspend_both() clears the can_submit flag and PHYs on
+PCI network cards may become inaccessible upon suspend as well.
+
+Amend phy_interrupt() to avoid triggering the state machine if the PHY
+is suspended. Signal wakeup instead if the attached net_device or its
+parent has been configured as a wakeup source. (Those conditions are
+identical to mdio_bus_phy_may_suspend().) Postpone handling of the
+interrupt until the PHY has resumed.
+
+Before stopping the phy_state_machine() in mdio_bus_phy_suspend(),
+wait for a concurrent phy_interrupt() to run to completion. That is
+necessary because phy_interrupt() may have checked the PHY's suspend
+status before the system sleep transition commenced and it may thus
+retrigger the state machine after it was stopped.
+
+Likewise, after re-enabling interrupt handling in mdio_bus_phy_resume(),
+wait for a concurrent phy_interrupt() to complete to ensure that
+interrupts which it postponed are properly rerun.
+
+The issue was exposed by commit 1ce8b37241ed ("usbnet: smsc95xx: Forward
+PHY interrupts to PHY driver to avoid polling"), but has existed since
+forever.
+
+Fixes: 541cd3ee00a4 ("phylib: Fix deadlock on resume")
+Link: https://lore.kernel.org/netdev/a5315a8a-32c2-962f-f696-de9a26d30091@samsung.com/
+Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: stable@vger.kernel.org # v2.6.33+
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/b7f386d04e9b5b0e2738f0125743e30676f309ef.1656410895.git.lukas@wunner.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 23 +++++++++++++++++++++++
+ drivers/net/phy/phy_device.c | 23 +++++++++++++++++++++++
+ include/linux/phy.h | 6 ++++++
+ 3 files changed, 52 insertions(+)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -31,6 +31,7 @@
+ #include <linux/io.h>
+ #include <linux/uaccess.h>
+ #include <linux/atomic.h>
++#include <linux/suspend.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+@@ -972,6 +973,28 @@ static irqreturn_t phy_interrupt(int irq
+ struct phy_driver *drv = phydev->drv;
+ irqreturn_t ret;
+
++ /* Wakeup interrupts may occur during a system sleep transition.
++ * Postpone handling until the PHY has resumed.
++ */
++ if (IS_ENABLED(CONFIG_PM_SLEEP) && phydev->irq_suspended) {
++ struct net_device *netdev = phydev->attached_dev;
++
++ if (netdev) {
++ struct device *parent = netdev->dev.parent;
++
++ if (netdev->wol_enabled)
++ pm_system_wakeup();
++ else if (device_may_wakeup(&netdev->dev))
++ pm_wakeup_dev_event(&netdev->dev, 0, true);
++ else if (parent && device_may_wakeup(parent))
++ pm_wakeup_dev_event(parent, 0, true);
++ }
++
++ phydev->irq_rerun = 1;
++ disable_irq_nosync(irq);
++ return IRQ_HANDLED;
++ }
++
+ mutex_lock(&phydev->lock);
+ ret = drv->handle_interrupt(phydev);
+ mutex_unlock(&phydev->lock);
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -277,6 +277,15 @@ static __maybe_unused int mdio_bus_phy_s
+ if (phydev->mac_managed_pm)
+ return 0;
+
++ /* Wakeup interrupts may occur during the system sleep transition when
++ * the PHY is inaccessible. Set flag to postpone handling until the PHY
++ * has resumed. Wait for concurrent interrupt handler to complete.
++ */
++ if (phy_interrupt_is_valid(phydev)) {
++ phydev->irq_suspended = 1;
++ synchronize_irq(phydev->irq);
++ }
++
+ /* We must stop the state machine manually, otherwise it stops out of
+ * control, possibly with the phydev->lock held. Upon resume, netdev
+ * may call phy routines that try to grab the same lock, and that may
+@@ -314,6 +323,20 @@ static __maybe_unused int mdio_bus_phy_r
+ if (ret < 0)
+ return ret;
+ no_resume:
++ if (phy_interrupt_is_valid(phydev)) {
++ phydev->irq_suspended = 0;
++ synchronize_irq(phydev->irq);
++
++ /* Rerun interrupts which were postponed by phy_interrupt()
++ * because they occurred during the system sleep transition.
++ */
++ if (phydev->irq_rerun) {
++ phydev->irq_rerun = 0;
++ enable_irq(phydev->irq);
++ irq_wake_thread(phydev->irq, phydev);
++ }
++ }
++
+ if (phydev->attached_dev && phydev->adjust_link)
+ phy_start_machine(phydev);
+
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -571,6 +571,10 @@ struct macsec_ops;
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @interrupts: Flag interrupts have been enabled
++ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
++ * handling shall be postponed until PHY has resumed
++ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
++ * requiring a rerun of the interrupt handler after resume
+ * @interface: enum phy_interface_t value
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+@@ -625,6 +629,8 @@ struct phy_device {
+
+ /* Interrupts are enabled */
+ unsigned interrupts:1;
++ unsigned irq_suspended:1;
++ unsigned irq_rerun:1;
+
+ enum phy_state state;
+
--- /dev/null
+From ef9102004a87cb3f8b26e000a095a261fc0467d3 Mon Sep 17 00:00:00 2001
+From: Chris Ye <chris.ye@intel.com>
+Date: Tue, 31 May 2022 17:09:54 -0700
+Subject: nvdimm: Fix badblocks clear off-by-one error
+
+From: Chris Ye <chris.ye@intel.com>
+
+commit ef9102004a87cb3f8b26e000a095a261fc0467d3 upstream.
+
+nvdimm_clear_badblocks_region() validates badblock clearing requests
+against the span of the region, however it compares the inclusive
+badblock request range to the exclusive region range. Fix up the
+off-by-one error.
+
+Fixes: 23f498448362 ("libnvdimm: rework region badblocks clearing")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chris Ye <chris.ye@intel.com>
+Reviewed-by: Vishal Verma <vishal.l.verma@intel.com>
+Link: https://lore.kernel.org/r/165404219489.2445897.9792886413715690399.stgit@dwillia2-xfh
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/bus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -182,8 +182,8 @@ static int nvdimm_clear_badblocks_region
+ ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
+
+ /* make sure we are in the region */
+- if (ctx->phys < nd_region->ndr_start
+- || (ctx->phys + ctx->cleared) > ndr_end)
++ if (ctx->phys < nd_region->ndr_start ||
++ (ctx->phys + ctx->cleared - 1) > ndr_end)
+ return 0;
+
+ sector = (ctx->phys - nd_region->ndr_start) / 512;
--- /dev/null
+From e1c70d79346356bb1ede3f79436df80917845ab9 Mon Sep 17 00:00:00 2001
+From: Lamarque Vieira Souza <lamarque@petrosoftdesign.com>
+Date: Wed, 29 Jun 2022 21:30:53 -0300
+Subject: nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA IM2P33F8ABR1
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lamarque Vieira Souza <lamarque@petrosoftdesign.com>
+
+commit e1c70d79346356bb1ede3f79436df80917845ab9 upstream.
+
+ADATA IM2P33F8ABR1 reports bogus eui64 values that appear to be the same
+across all drives. Quirk them out so they are not marked as "non globally
+unique" duplicates.
+
+Co-developed-by: Felipe de Jesus Araujo da Conceição <felipe.conceicao@petrosoftdesign.com>
+Signed-off-by: Felipe de Jesus Araujo da Conceição <felipe.conceicao@petrosoftdesign.com>
+Signed-off-by: Lamarque V. Souza <lamarque.souza@petrosoftdesign.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+---
+ drivers/nvme/host/pci.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3436,6 +3436,8 @@ static const struct pci_device_id nvme_i
+ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
++ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
--- /dev/null
+From 1629de0e0373e04d68e88e6d9d3071fbf70b7ea8 Mon Sep 17 00:00:00 2001
+From: Pablo Greco <pgreco@centosproject.org>
+Date: Sat, 25 Jun 2022 09:15:02 -0300
+Subject: nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG SX6000LNP (AKA SPECTRIX S40G)
+
+From: Pablo Greco <pgreco@centosproject.org>
+
+commit 1629de0e0373e04d68e88e6d9d3071fbf70b7ea8 upstream.
+
+ADATA XPG SPECTRIX S40G drives report bogus eui64 values that appear to
+be the same across drives in one system. Quirk them out so they are
+not marked as "non globally unique" duplicates.
+
+Before:
+[ 2.258919] nvme nvme1: pci function 0000:06:00.0
+[ 2.264898] nvme nvme2: pci function 0000:05:00.0
+[ 2.323235] nvme nvme1: failed to set APST feature (2)
+[ 2.326153] nvme nvme2: failed to set APST feature (2)
+[ 2.333935] nvme nvme1: allocated 64 MiB host memory buffer.
+[ 2.336492] nvme nvme2: allocated 64 MiB host memory buffer.
+[ 2.339611] nvme nvme1: 7/0/0 default/read/poll queues
+[ 2.341805] nvme nvme2: 7/0/0 default/read/poll queues
+[ 2.346114] nvme1n1: p1
+[ 2.347197] nvme nvme2: globally duplicate IDs for nsid 1
+After:
+[ 2.427715] nvme nvme1: pci function 0000:06:00.0
+[ 2.427771] nvme nvme2: pci function 0000:05:00.0
+[ 2.488154] nvme nvme2: failed to set APST feature (2)
+[ 2.489895] nvme nvme1: failed to set APST feature (2)
+[ 2.498773] nvme nvme2: allocated 64 MiB host memory buffer.
+[ 2.500587] nvme nvme1: allocated 64 MiB host memory buffer.
+[ 2.504113] nvme nvme2: 7/0/0 default/read/poll queues
+[ 2.507026] nvme nvme1: 7/0/0 default/read/poll queues
+[ 2.509467] nvme nvme2: Ignoring bogus Namespace Identifiers
+[ 2.512804] nvme nvme1: Ignoring bogus Namespace Identifiers
+[ 2.513698] nvme1n1: p1
+
+Signed-off-by: Pablo Greco <pgreco@centosproject.org>
+Reviewed-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/pci.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3437,7 +3437,8 @@ static const struct pci_device_id nvme_i
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
++ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
--- /dev/null
+From aa78fa905b4431c432071a878da99c2b37fc0e79 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 1 Jul 2022 09:00:41 +0200
+Subject: parisc: Fix vDSO signal breakage on 32-bit kernel
+
+From: Helge Deller <deller@gmx.de>
+
+commit aa78fa905b4431c432071a878da99c2b37fc0e79 upstream.
+
+Addition of vDSO support for parisc in kernel v5.18 suddenly broke glibc
+signal testcases on a 32-bit kernel.
+
+The trampoline code (sigtramp.S) which is mapped into userspace includes
+an offset to the context data on the stack, which is used by gdb and
+glibc to get access to registers.
+
+In a 32-bit kernel we used by mistake the offset into the compat context
+(which is valid on a 64-bit kernel only) instead of the offset into the
+"native" 32-bit context.
+
+Reported-by: John David Anglin <dave.anglin@bell.net>
+Tested-by: John David Anglin <dave.anglin@bell.net>
+Fixes: df24e1783e6e ("parisc: Add vDSO support")
+CC: stable@vger.kernel.org # 5.18
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/asm-offsets.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
+index 2673d57eeb00..94652e13c260 100644
+--- a/arch/parisc/kernel/asm-offsets.c
++++ b/arch/parisc/kernel/asm-offsets.c
+@@ -224,8 +224,13 @@ int main(void)
+ BLANK();
+ DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE);
+ DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
++#ifdef CONFIG_64BIT
+ DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32);
+ DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32);
++#else
++ DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE);
++ DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
++#endif
+ BLANK();
+ DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
+ DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+--
+2.37.0
+
--- /dev/null
+From 96b80fcd2705fc50ebe1f7f3ce204e861b3099ab Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 27 Jun 2022 01:39:11 +0200
+Subject: parisc/unaligned: Fix emulate_ldw() breakage
+
+From: Helge Deller <deller@gmx.de>
+
+commit 96b80fcd2705fc50ebe1f7f3ce204e861b3099ab upstream.
+
+The commit e8aa7b17fe41 broke the 32-bit load-word unalignment exception
+handler because it calculated the wrong amount of bits by which the value
+should be shifted. This patch fixes it.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Fixes: e8aa7b17fe41 ("parisc/unaligned: Rewrite inline assembly of emulate_ldw()")
+Cc: stable@vger.kernel.org # v5.18
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/unaligned.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index ed1e88a74dc4..bac581b5ecfc 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -146,7 +146,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+ " depw %%r0,31,2,%4\n"
+ "1: ldw 0(%%sr1,%4),%0\n"
+ "2: ldw 4(%%sr1,%4),%3\n"
+-" subi 32,%4,%2\n"
++" subi 32,%2,%2\n"
+ " mtctl %2,11\n"
+ " vshd %0,%3,%0\n"
+ "3: \n"
+--
+2.37.0
+
--- /dev/null
+From 986481618023e18e187646b0fff05a3c337531cb Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Thu, 23 Jun 2022 10:56:17 +0200
+Subject: powerpc/book3e: Fix PUD allocation size in map_kernel_page()
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 986481618023e18e187646b0fff05a3c337531cb upstream.
+
+Commit 2fb4706057bc ("powerpc: add support for folded p4d page tables")
+erroneously changed PUD setup to a mix of PMD and PUD. Fix it.
+
+While at it, use PTE_TABLE_SIZE instead of PAGE_SIZE for PTE tables
+in order to avoid any confusion.
+
+Fixes: 2fb4706057bc ("powerpc: add support for folded p4d page tables")
+Cc: stable@vger.kernel.org # v5.8+
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Acked-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/95ddfd6176d53e6c85e13bd1c358359daa56775f.1655974558.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/mm/nohash/book3e_pgtable.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
++++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
+@@ -96,8 +96,8 @@ int __ref map_kernel_page(unsigned long
+ pgdp = pgd_offset_k(ea);
+ p4dp = p4d_offset(pgdp, ea);
+ if (p4d_none(*p4dp)) {
+- pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
+- p4d_populate(&init_mm, p4dp, pmdp);
++ pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
++ p4d_populate(&init_mm, p4dp, pudp);
+ }
+ pudp = pud_offset(p4dp, ea);
+ if (pud_none(*pudp)) {
+@@ -106,7 +106,7 @@ int __ref map_kernel_page(unsigned long
+ }
+ pmdp = pmd_offset(pudp, ea);
+ if (!pmd_present(*pmdp)) {
+- ptep = early_alloc_pgtable(PAGE_SIZE);
++ ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ ptep = pte_offset_kernel(pmdp, ea);
--- /dev/null
+From b21bd5a4b130f8370861478d2880985daace5913 Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Tue, 28 Jun 2022 00:41:19 +0530
+Subject: powerpc/bpf: Fix use of user_pt_regs in uapi
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit b21bd5a4b130f8370861478d2880985daace5913 upstream.
+
+Trying to build a .c file that includes <linux/bpf_perf_event.h>:
+ $ cat test_bpf_headers.c
+ #include <linux/bpf_perf_event.h>
+
+throws the below error:
+ /usr/include/linux/bpf_perf_event.h:14:28: error: field ‘regs’ has incomplete type
+ 14 | bpf_user_pt_regs_t regs;
+ | ^~~~
+
+This is because we typedef bpf_user_pt_regs_t to 'struct user_pt_regs'
+in arch/powerpc/include/uaps/asm/bpf_perf_event.h, but 'struct
+user_pt_regs' is not exposed to userspace.
+
+Powerpc has both pt_regs and user_pt_regs structures. However, unlike
+arm64 and s390, we expose user_pt_regs to userspace as just 'pt_regs'.
+As such, we should typedef bpf_user_pt_regs_t to 'struct pt_regs' for
+userspace.
+
+Within the kernel though, we want to typedef bpf_user_pt_regs_t to
+'struct user_pt_regs'.
+
+Remove arch/powerpc/include/uapi/asm/bpf_perf_event.h so that the
+uapi/asm-generic version of the header is exposed to userspace.
+Introduce arch/powerpc/include/asm/bpf_perf_event.h so that we can
+typedef bpf_user_pt_regs_t to 'struct user_pt_regs' for use within the
+kernel.
+
+Note that this was not showing up with the bpf selftest build since
+tools/include/uapi/asm/bpf_perf_event.h didn't include the powerpc
+variant.
+
+Fixes: a6460b03f945ee ("powerpc/bpf: Fix broken uapi for BPF_PROG_TYPE_PERF_EVENT")
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+[mpe: Use typical naming for header include guard]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220627191119.142867-1-naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/include/asm/bpf_perf_event.h | 9 +++++++++
+ arch/powerpc/include/uapi/asm/bpf_perf_event.h | 9 ---------
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+ create mode 100644 arch/powerpc/include/asm/bpf_perf_event.h
+ delete mode 100644 arch/powerpc/include/uapi/asm/bpf_perf_event.h
+
+--- /dev/null
++++ b/arch/powerpc/include/asm/bpf_perf_event.h
+@@ -0,0 +1,9 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
++#define _ASM_POWERPC_BPF_PERF_EVENT_H
++
++#include <asm/ptrace.h>
++
++typedef struct user_pt_regs bpf_user_pt_regs_t;
++
++#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
+--- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h
++++ /dev/null
+@@ -1,9 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+-#define _UAPI__ASM_BPF_PERF_EVENT_H__
+-
+-#include <asm/ptrace.h>
+-
+-typedef struct user_pt_regs bpf_user_pt_regs_t;
+-
+-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
--- /dev/null
+From 6886da5f49e6d86aad76807a93f3eef5e4f01b10 Mon Sep 17 00:00:00 2001
+From: Liam Howlett <liam.howlett@oracle.com>
+Date: Fri, 24 Jun 2022 01:17:58 +0000
+Subject: powerpc/prom_init: Fix kernel config grep
+
+From: Liam Howlett <liam.howlett@oracle.com>
+
+commit 6886da5f49e6d86aad76807a93f3eef5e4f01b10 upstream.
+
+When searching for config options, use the KCONFIG_CONFIG shell variable
+so that builds using non-standard config locations work.
+
+Fixes: 26deb04342e3 ("powerpc: prepare string/mem functions for KASAN")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220624011745.4060795-1-Liam.Howlett@oracle.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/prom_init_check.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/prom_init_check.sh
++++ b/arch/powerpc/kernel/prom_init_check.sh
+@@ -13,7 +13,7 @@
+ # If you really need to reference something from prom_init.o add
+ # it to the list below:
+
+-grep "^CONFIG_KASAN=y$" .config >/dev/null
++grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
+ if [ $? -eq 0 ]
+ then
+ MEM_FUNCS="__memcpy __memset"
--- /dev/null
+From a775e4e4941bf2f326aa36c58f67bd6c96cac717 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Mon, 20 Jun 2022 18:29:39 -0400
+Subject: Revert "drm/amdgpu/display: set vblank_disable_immediate for DC"
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit a775e4e4941bf2f326aa36c58f67bd6c96cac717 upstream.
+
+This reverts commit 92020e81ddbeac351ea4a19bcf01743f32b9c800.
+
+This causes stuttering and timeouts with DMCUB for some users
+so revert it until we understand why and safely enable it
+to save power.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1887
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 1 +
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ---
+ 2 files changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -333,6 +333,7 @@ int amdgpu_irq_init(struct amdgpu_device
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+ /* Disable vblank IRQs aggressively for power-saving */
++ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4286,9 +4286,6 @@ static int amdgpu_dm_initialize_drm_devi
+ }
+ #endif
+
+- /* Disable vblank IRQs aggressively for power-saving. */
+- adev_to_drm(adev)->vblank_disable_immediate = true;
+-
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
--- /dev/null
+From e4f74400308cb8abde5fdc9cad609c2aba32110c Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Sat, 11 Jun 2022 00:20:23 +0200
+Subject: s390/archrandom: simplify back to earlier design and initialize earlier
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit e4f74400308cb8abde5fdc9cad609c2aba32110c upstream.
+
+s390x appears to present two RNG interfaces:
+- a "TRNG" that gathers entropy using some hardware function; and
+- a "DRBG" that takes in a seed and expands it.
+
+Previously, the TRNG was wired up to arch_get_random_{long,int}(), but
+it was observed that this was being called really frequently, resulting
+in high overhead. So it was changed to be wired up to arch_get_random_
+seed_{long,int}(), which was a reasonable decision. Later on, the DRBG
+was then wired up to arch_get_random_{long,int}(), with a complicated
+buffer filling thread, to control overhead and rate.
+
+Fortunately, none of the performance issues matter much now. The RNG
+always attempts to use arch_get_random_seed_{long,int}() first, which
+means a complicated implementation of arch_get_random_{long,int}() isn't
+really valuable or useful to have around. And it's only used when
+reseeding, which means it won't hit the high throughput complications
+that were faced before.
+
+So this commit returns to an earlier design of just calling the TRNG in
+arch_get_random_seed_{long,int}(), and returning false in arch_get_
+random_{long,int}().
+
+Part of what makes the simplification possible is that the RNG now seeds
+itself using the TRNG at bootup. But this only works if the TRNG is
+detected early in boot, before random_init() is called. So this commit
+also causes that check to happen in setup_arch().
+
+Cc: stable@vger.kernel.org
+Cc: Harald Freudenberger <freude@linux.ibm.com>
+Cc: Ingo Franzki <ifranzki@linux.ibm.com>
+Cc: Juergen Christ <jchrist@linux.ibm.com>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Link: https://lore.kernel.org/r/20220610222023.378448-1-Jason@zx2c4.com
+Reviewed-by: Harald Freudenberger <freude@linux.ibm.com>
+Acked-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/crypto/arch_random.c | 217 -------------------------------------
+ arch/s390/include/asm/archrandom.h | 14 +-
+ arch/s390/kernel/setup.c | 5
+ 3 files changed, 12 insertions(+), 224 deletions(-)
+
+--- a/arch/s390/crypto/arch_random.c
++++ b/arch/s390/crypto/arch_random.c
+@@ -4,232 +4,15 @@
+ *
+ * Copyright IBM Corp. 2017, 2020
+ * Author(s): Harald Freudenberger
+- *
+- * The s390_arch_random_generate() function may be called from random.c
+- * in interrupt context. So this implementation does the best to be very
+- * fast. There is a buffer of random data which is asynchronously checked
+- * and filled by a workqueue thread.
+- * If there are enough bytes in the buffer the s390_arch_random_generate()
+- * just delivers these bytes. Otherwise false is returned until the
+- * worker thread refills the buffer.
+- * The worker fills the rng buffer by pulling fresh entropy from the
+- * high quality (but slow) true hardware random generator. This entropy
+- * is then spread over the buffer with an pseudo random generator PRNG.
+- * As the arch_get_random_seed_long() fetches 8 bytes and the calling
+- * function add_interrupt_randomness() counts this as 1 bit entropy the
+- * distribution needs to make sure there is in fact 1 bit entropy contained
+- * in 8 bytes of the buffer. The current values pull 32 byte entropy
+- * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
+- * will contain 1 bit of entropy.
+- * The worker thread is rescheduled based on the charge level of the
+- * buffer but at least with 500 ms delay to avoid too much CPU consumption.
+- * So the max. amount of rng data delivered via arch_get_random_seed is
+- * limited to 4k bytes per second.
+ */
+
+ #include <linux/kernel.h>
+ #include <linux/atomic.h>
+ #include <linux/random.h>
+-#include <linux/slab.h>
+ #include <linux/static_key.h>
+-#include <linux/workqueue.h>
+-#include <linux/moduleparam.h>
+ #include <asm/cpacf.h>
+
+ DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
+
+ atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
+ EXPORT_SYMBOL(s390_arch_random_counter);
+-
+-#define ARCH_REFILL_TICKS (HZ/2)
+-#define ARCH_PRNG_SEED_SIZE 32
+-#define ARCH_RNG_BUF_SIZE 2048
+-
+-static DEFINE_SPINLOCK(arch_rng_lock);
+-static u8 *arch_rng_buf;
+-static unsigned int arch_rng_buf_idx;
+-
+-static void arch_rng_refill_buffer(struct work_struct *);
+-static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
+-
+-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
+-{
+- /* max hunk is ARCH_RNG_BUF_SIZE */
+- if (nbytes > ARCH_RNG_BUF_SIZE)
+- return false;
+-
+- /* lock rng buffer */
+- if (!spin_trylock(&arch_rng_lock))
+- return false;
+-
+- /* try to resolve the requested amount of bytes from the buffer */
+- arch_rng_buf_idx -= nbytes;
+- if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
+- memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
+- atomic64_add(nbytes, &s390_arch_random_counter);
+- spin_unlock(&arch_rng_lock);
+- return true;
+- }
+-
+- /* not enough bytes in rng buffer, refill is done asynchronously */
+- spin_unlock(&arch_rng_lock);
+-
+- return false;
+-}
+-EXPORT_SYMBOL(s390_arch_random_generate);
+-
+-static void arch_rng_refill_buffer(struct work_struct *unused)
+-{
+- unsigned int delay = ARCH_REFILL_TICKS;
+-
+- spin_lock(&arch_rng_lock);
+- if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
+- /* buffer is exhausted and needs refill */
+- u8 seed[ARCH_PRNG_SEED_SIZE];
+- u8 prng_wa[240];
+- /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
+- cpacf_trng(NULL, 0, seed, sizeof(seed));
+- /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
+- memset(prng_wa, 0, sizeof(prng_wa));
+- cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+- &prng_wa, NULL, 0, seed, sizeof(seed));
+- cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
+- &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
+- arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
+- }
+- delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
+- spin_unlock(&arch_rng_lock);
+-
+- /* kick next check */
+- queue_delayed_work(system_long_wq, &arch_rng_work, delay);
+-}
+-
+-/*
+- * Here follows the implementation of s390_arch_get_random_long().
+- *
+- * The random longs to be pulled by arch_get_random_long() are
+- * prepared in an 4K buffer which is filled from the NIST 800-90
+- * compliant s390 drbg. By default the random long buffer is refilled
+- * 256 times before the drbg itself needs a reseed. The reseed of the
+- * drbg is done with 32 bytes fetched from the high quality (but slow)
+- * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256
+- * bits of entropy are spread over 256 * 4KB = 1MB serving 131072
+- * arch_get_random_long() invocations before reseeded.
+- *
+- * How often the 4K random long buffer is refilled with the drbg
+- * before the drbg is reseeded can be adjusted. There is a module
+- * parameter 's390_arch_rnd_long_drbg_reseed' accessible via
+- * /sys/module/arch_random/parameters/rndlong_drbg_reseed
+- * or as kernel command line parameter
+- * arch_random.rndlong_drbg_reseed=<value>
+- * This parameter tells how often the drbg fills the 4K buffer before
+- * it is re-seeded by fresh entropy from the trng.
+- * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64
+- * KB with 32 bytes of fresh entropy pulled from the trng. So a value
+- * of 16 would result in 256 bits entropy per 64 KB.
+- * A value of 256 results in 1MB of drbg output before a reseed of the
+- * drbg is done. So this would spread the 256 bits of entropy among 1MB.
+- * Setting this parameter to 0 forces the reseed to take place every
+- * time the 4K buffer is depleted, so the entropy rises to 256 bits
+- * entropy per 4K or 0.5 bit entropy per arch_get_random_long(). With
+- * setting this parameter to negative values all this effort is
+- * disabled, arch_get_random long() returns false and thus indicating
+- * that the arch_get_random_long() feature is disabled at all.
+- */
+-
+-static unsigned long rndlong_buf[512];
+-static DEFINE_SPINLOCK(rndlong_lock);
+-static int rndlong_buf_index;
+-
+-static int rndlong_drbg_reseed = 256;
+-module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600);
+-MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed");
+-
+-static inline void refill_rndlong_buf(void)
+-{
+- static u8 prng_ws[240];
+- static int drbg_counter;
+-
+- if (--drbg_counter < 0) {
+- /* need to re-seed the drbg */
+- u8 seed[32];
+-
+- /* fetch seed from trng */
+- cpacf_trng(NULL, 0, seed, sizeof(seed));
+- /* seed drbg */
+- memset(prng_ws, 0, sizeof(prng_ws));
+- cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
+- &prng_ws, NULL, 0, seed, sizeof(seed));
+- /* re-init counter for drbg */
+- drbg_counter = rndlong_drbg_reseed;
+- }
+-
+- /* fill the arch_get_random_long buffer from drbg */
+- cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws,
+- (u8 *) rndlong_buf, sizeof(rndlong_buf),
+- NULL, 0);
+-}
+-
+-bool s390_arch_get_random_long(unsigned long *v)
+-{
+- bool rc = false;
+- unsigned long flags;
+-
+- /* arch_get_random_long() disabled ? */
+- if (rndlong_drbg_reseed < 0)
+- return false;
+-
+- /* try to lock the random long lock */
+- if (!spin_trylock_irqsave(&rndlong_lock, flags))
+- return false;
+-
+- if (--rndlong_buf_index >= 0) {
+- /* deliver next long value from the buffer */
+- *v = rndlong_buf[rndlong_buf_index];
+- rc = true;
+- goto out;
+- }
+-
+- /* buffer is depleted and needs refill */
+- if (in_interrupt()) {
+- /* delay refill in interrupt context to next caller */
+- rndlong_buf_index = 0;
+- goto out;
+- }
+-
+- /* refill random long buffer */
+- refill_rndlong_buf();
+- rndlong_buf_index = ARRAY_SIZE(rndlong_buf);
+-
+- /* and provide one random long */
+- *v = rndlong_buf[--rndlong_buf_index];
+- rc = true;
+-
+-out:
+- spin_unlock_irqrestore(&rndlong_lock, flags);
+- return rc;
+-}
+-EXPORT_SYMBOL(s390_arch_get_random_long);
+-
+-static int __init s390_arch_random_init(void)
+-{
+- /* all the needed PRNO subfunctions available ? */
+- if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
+- cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
+-
+- /* alloc arch random working buffer */
+- arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
+- if (!arch_rng_buf)
+- return -ENOMEM;
+-
+- /* kick worker queue job to fill the random buffer */
+- queue_delayed_work(system_long_wq,
+- &arch_rng_work, ARCH_REFILL_TICKS);
+-
+- /* enable arch random to the outside world */
+- static_branch_enable(&s390_arch_random_available);
+- }
+-
+- return 0;
+-}
+-arch_initcall(s390_arch_random_init);
+--- a/arch/s390/include/asm/archrandom.h
++++ b/arch/s390/include/asm/archrandom.h
+@@ -15,17 +15,13 @@
+
+ #include <linux/static_key.h>
+ #include <linux/atomic.h>
++#include <asm/cpacf.h>
+
+ DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
+ extern atomic64_t s390_arch_random_counter;
+
+-bool s390_arch_get_random_long(unsigned long *v);
+-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes);
+-
+ static inline bool __must_check arch_get_random_long(unsigned long *v)
+ {
+- if (static_branch_likely(&s390_arch_random_available))
+- return s390_arch_get_random_long(v);
+ return false;
+ }
+
+@@ -37,7 +33,9 @@ static inline bool __must_check arch_get
+ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+ {
+ if (static_branch_likely(&s390_arch_random_available)) {
+- return s390_arch_random_generate((u8 *)v, sizeof(*v));
++ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
++ atomic64_add(sizeof(*v), &s390_arch_random_counter);
++ return true;
+ }
+ return false;
+ }
+@@ -45,7 +43,9 @@ static inline bool __must_check arch_get
+ static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+ {
+ if (static_branch_likely(&s390_arch_random_available)) {
+- return s390_arch_random_generate((u8 *)v, sizeof(*v));
++ cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
++ atomic64_add(sizeof(*v), &s390_arch_random_counter);
++ return true;
+ }
+ return false;
+ }
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -875,6 +875,11 @@ static void __init setup_randomness(void
+ if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+ add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
+ memblock_free(vmms, PAGE_SIZE);
++
++#ifdef CONFIG_ARCH_RANDOM
++ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
++ static_branch_enable(&s390_arch_random_available);
++#endif
+ }
+
+ /*
--- /dev/null
+drm-amdgpu-fix-adev-variable-used-in-amdgpu_device_gpu_recover.patch
+revert-drm-amdgpu-display-set-vblank_disable_immediate-for-dc.patch
+drm-amdgpu-to-flush-tlb-for-mmhub-of-raven-series.patch
+ksmbd-set-the-range-of-bytes-to-zero-without-extending-file-size-in-fsctl_zero_data.patch
+ksmbd-check-invalid-fileoffset-and-beyondfinalzero-in-fsctl_zero_data.patch
+ksmbd-use-vfs_llseek-instead-of-dereferencing-null.patch
+ipv6-take-care-of-disable_policy-when-restoring-routes.patch
+net-phy-don-t-trigger-state-machine-while-in-suspend.patch
+s390-archrandom-simplify-back-to-earlier-design-and-initialize-earlier.patch
+nvme-pci-add-nvme_quirk_bogus_nid-for-adata-xpg-sx6000lnp-aka-spectrix-s40g.patch
+nvme-pci-add-nvme_quirk_bogus_nid-for-adata-im2p33f8abr1.patch
+nvdimm-fix-badblocks-clear-off-by-one-error.patch
+ceph-wait-on-async-create-before-checking-caps-for-syncfs.patch
+parisc-fix-vdso-signal-breakage-on-32-bit-kernel.patch
+parisc-unaligned-fix-emulate_ldw-breakage.patch
+powerpc-prom_init-fix-kernel-config-grep.patch
+powerpc-book3e-fix-pud-allocation-size-in-map_kernel_page.patch
+powerpc-bpf-fix-use-of-user_pt_regs-in-uapi.patch
+cpufreq-amd-pstate-add-resume-and-suspend-callbacks.patch
+dm-raid-fix-accesses-beyond-end-of-raid-member-array.patch
+dm-raid-fix-kasan-warning-in-raid5_add_disks.patch