--- /dev/null
+From foo@baz Mon Feb 28 11:59:17 AM CET 2022
+From: Baokun Li <libaokun1@huawei.com>
+Date: Sat, 26 Feb 2022 14:32:01 +0800
+Subject: fget: clarify and improve __fget_files() implementation
+To: <linux-kernel@vger.kernel.org>
+Cc: <gregkh@linuxfoundation.org>, <stable@vger.kernel.org>, <oliver.sang@intel.com>, <beibei.si@intel.com>, <jannh@google.com>, <mszeredi@redhat.com>, <torvalds@linux-foundation.org>, <libaokun1@huawei.com>, <yukuai3@huawei.com>
+Message-ID: <20220226063201.167183-1-libaokun1@huawei.com>
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit e386dfc56f837da66d00a078e5314bc8382fab83 upstream.
+
+Commit 054aa8d439b9 ("fget: check that the fd still exists after getting
+a ref to it") fixed a race with getting a reference to a file just as it
+was being closed. It was a fairly minimal patch, and I didn't think
+re-checking the file pointer lookup would be a measurable overhead,
+since it was all right there and cached.
+
+But I was wrong, as pointed out by the kernel test robot.
+
+The 'poll2' case of the will-it-scale.per_thread_ops benchmark regressed
+quite noticeably. Admittedly it seems to be a very artificial test:
+doing "poll()" system calls on regular files in a very tight loop in
+multiple threads.
+
+That means that basically all the time is spent just looking up file
+descriptors without ever doing anything useful with them (not that doing
+'poll()' on a regular file is useful to begin with). And as a result it
+shows the extra "re-check fd" cost as a sore thumb.
+
+Happily, the regression is fixable by just writing the code to loook up
+the fd to be better and clearer. There's still a cost to verify the
+file pointer, but now it's basically in the noise even for that
+benchmark that does nothing else - and the code is more understandable
+and has better comments too.
+
+[ Side note: this patch is also a classic case of one that looks very
+ messy with the default greedy Myers diff - it's much more legible with
+ either the patience of histogram diff algorithm ]
+
+Link: https://lore.kernel.org/lkml/20211210053743.GA36420@xsang-OptiPlex-9020/
+Link: https://lore.kernel.org/lkml/20211213083154.GA20853@linux.intel.com/
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Tested-by: Carel Si <beibei.si@intel.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/file.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 57 insertions(+), 16 deletions(-)
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -706,28 +706,69 @@ void do_close_on_exec(struct files_struc
+ spin_unlock(&files->file_lock);
+ }
+
+-static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
++static inline struct file *__fget_files_rcu(struct files_struct *files,
++ unsigned int fd, fmode_t mask, unsigned int refs)
+ {
+- struct files_struct *files = current->files;
+- struct file *file;
++ for (;;) {
++ struct file *file;
++ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
++ struct file __rcu **fdentry;
+
+- rcu_read_lock();
+-loop:
+- file = fcheck_files(files, fd);
+- if (file) {
+- /* File object ref couldn't be taken.
+- * dup2() atomicity guarantee is the reason
+- * we loop to catch the new file (or NULL pointer)
++ if (unlikely(fd >= fdt->max_fds))
++ return NULL;
++
++ fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
++ file = rcu_dereference_raw(*fdentry);
++ if (unlikely(!file))
++ return NULL;
++
++ if (unlikely(file->f_mode & mask))
++ return NULL;
++
++ /*
++ * Ok, we have a file pointer. However, because we do
++ * this all locklessly under RCU, we may be racing with
++ * that file being closed.
++ *
++ * Such a race can take two forms:
++ *
++ * (a) the file ref already went down to zero,
++ * and get_file_rcu_many() fails. Just try
++ * again:
+ */
+- if (file->f_mode & mask)
+- file = NULL;
+- else if (!get_file_rcu_many(file, refs))
+- goto loop;
+- else if (__fcheck_files(files, fd) != file) {
++ if (unlikely(!get_file_rcu_many(file, refs)))
++ continue;
++
++ /*
++ * (b) the file table entry has changed under us.
++ * Note that we don't need to re-check the 'fdt->fd'
++ * pointer having changed, because it always goes
++ * hand-in-hand with 'fdt'.
++ *
++ * If so, we need to put our refs and try again.
++ */
++ if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
++ unlikely(rcu_dereference_raw(*fdentry) != file)) {
+ fput_many(file, refs);
+- goto loop;
++ continue;
+ }
++
++ /*
++ * Ok, we have a ref to the file, and checked that it
++ * still exists.
++ */
++ return file;
+ }
++}
++
++
++static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
++{
++ struct files_struct *files = current->files;
++ struct file *file;
++
++ rcu_read_lock();
++ file = __fget_files_rcu(files, fd, mask, refs);
+ rcu_read_unlock();
+
+ return file;
--- /dev/null
+From c94afc46cae7ad41b2ad6a99368147879f4b0e56 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Thu, 17 Feb 2022 22:53:27 +0800
+Subject: memblock: use kfree() to release kmalloced memblock regions
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit c94afc46cae7ad41b2ad6a99368147879f4b0e56 upstream.
+
+memblock.{reserved,memory}.regions may be allocated using kmalloc() in
+memblock_double_array(). Use kfree() to release these kmalloced regions
+indicated by memblock_{reserved,memory}_in_slab.
+
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Fixes: 3010f876500f ("mm: discard memblock data later")
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memblock.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -348,14 +348,20 @@ void __init memblock_discard(void)
+ addr = __pa(memblock.reserved.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.reserved.max);
+- __memblock_free_late(addr, size);
++ if (memblock_reserved_in_slab)
++ kfree(memblock.reserved.regions);
++ else
++ __memblock_free_late(addr, size);
+ }
+
+ if (memblock.memory.regions != memblock_memory_init_regions) {
+ addr = __pa(memblock.memory.regions);
+ size = PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.memory.max);
+- __memblock_free_late(addr, size);
++ if (memblock_memory_in_slab)
++ kfree(memblock.memory.regions);
++ else
++ __memblock_free_late(addr, size);
+ }
+ }
+ #endif
--- /dev/null
+From kherbst@redhat.com Mon Feb 28 11:53:27 2022
+From: Karol Herbst <kherbst@redhat.com>
+Date: Mon, 28 Feb 2022 10:12:59 +0100
+Subject: Revert "drm/nouveau/pmu/gm200-: avoid touching PMU outside of DEVINIT/PREOS/ACR"
+To: stable@vger.kernel.org
+Cc: Karol Herbst <kherbst@redhat.com>, Ben Skeggs <bskeggs@redhat.com>, Lyude Paul <lyude@redhat.com>
+Message-ID: <20220228091259.996188-1-kherbst@redhat.com>
+
+From: Karol Herbst <kherbst@redhat.com>
+
+This reverts commit c9ec3d85c0eef7c71cdc68db758e0f0e378132c0.
+
+This commit causes a regression if 4cdd2450bf739bada353e82d27b00db9af8c3001
+is not applied as well. This was fixed for 5.16, 5.15 and 5.10.
+
+On older stable branches backporting this commit is complicated as relevant
+code changed quite a bit. Furthermore most of the affected hardware barely
+works on those and users would want to use the newer kernels anyway.
+
+Cc: stable@vger.kernel.org # 5.4 4.19 and 4.14
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Lyude Paul <lyude@redhat.com>
+Link: https://gitlab.freedesktop.org/drm/nouveau/-/issues/149
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c | 37 ++++++++++---------------
+ 1 file changed, 16 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+@@ -88,13 +88,20 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev
+ return 0;
+ }
+
+-static void
++static int
+ nvkm_pmu_reset(struct nvkm_pmu *pmu)
+ {
+ struct nvkm_device *device = pmu->subdev.device;
+
+ if (!pmu->func->enabled(pmu))
+- return;
++ return 0;
++
++ /* Inhibit interrupts, and wait for idle. */
++ nvkm_wr32(device, 0x10a014, 0x0000ffff);
++ nvkm_msec(device, 2000,
++ if (!nvkm_rd32(device, 0x10a04c))
++ break;
++ );
+
+ /* Reset. */
+ if (pmu->func->reset)
+@@ -105,37 +112,25 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
+ if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
+ break;
+ );
++
++ return 0;
+ }
+
+ static int
+ nvkm_pmu_preinit(struct nvkm_subdev *subdev)
+ {
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+- nvkm_pmu_reset(pmu);
+- return 0;
++ return nvkm_pmu_reset(pmu);
+ }
+
+ static int
+ nvkm_pmu_init(struct nvkm_subdev *subdev)
+ {
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+- struct nvkm_device *device = pmu->subdev.device;
+-
+- if (!pmu->func->init)
+- return 0;
+-
+- if (pmu->func->enabled(pmu)) {
+- /* Inhibit interrupts, and wait for idle. */
+- nvkm_wr32(device, 0x10a014, 0x0000ffff);
+- nvkm_msec(device, 2000,
+- if (!nvkm_rd32(device, 0x10a04c))
+- break;
+- );
+-
+- nvkm_pmu_reset(pmu);
+- }
+-
+- return pmu->func->init(pmu);
++ int ret = nvkm_pmu_reset(pmu);
++ if (ret == 0 && pmu->func->init)
++ ret = pmu->func->init(pmu);
++ return ret;
+ }
+
+ static int