--- /dev/null
+From 5e1e087457c94ad7fafbe1cf6f774c6999ee29d4 Mon Sep 17 00:00:00 2001
+From: Zenghui Yu <yuzenghui@huawei.com>
+Date: Tue, 9 Aug 2022 12:38:48 +0800
+Subject: arm64: Fix match_list for erratum 1286807 on Arm Cortex-A76
+
+From: Zenghui Yu <yuzenghui@huawei.com>
+
+commit 5e1e087457c94ad7fafbe1cf6f774c6999ee29d4 upstream.
+
+Since commit 51f559d66527 ("arm64: Enable repeat tlbi workaround on KRYO4XX
+gold CPUs"), we failed to detect erratum 1286807 on Cortex-A76 because its
+entry in arm64_repeat_tlbi_list[] was accidently corrupted by this commit.
+
+Fix this issue by creating a separate entry for Kryo4xx Gold.
+
+Fixes: 51f559d66527 ("arm64: Enable repeat tlbi workaround on KRYO4XX gold CPUs")
+Cc: Shreyas K K <quic_shrekk@quicinc.com>
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20220809043848.969-1-yuzenghui@huawei.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/cpu_errata.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -208,6 +208,8 @@ static const struct arm64_cpu_capabiliti
+ #ifdef CONFIG_ARM64_ERRATUM_1286807
+ {
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
++ },
++ {
+ /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
+ ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
+ },
--- /dev/null
+From 44e602b4e52f70f04620bbbf4fe46ecb40170bde Mon Sep 17 00:00:00 2001
+From: Liam Howlett <liam.howlett@oracle.com>
+Date: Wed, 10 Aug 2022 16:02:25 +0000
+Subject: binder_alloc: add missing mmap_lock calls when using the VMA
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Liam Howlett <liam.howlett@oracle.com>
+
+commit 44e602b4e52f70f04620bbbf4fe46ecb40170bde upstream.
+
+Take the mmap_read_lock() when using the VMA in binder_alloc_print_pages()
+and when checking for a VMA in binder_alloc_new_buf_locked().
+
+It is worth noting binder_alloc_new_buf_locked() drops the VMA read lock
+after it verifies a VMA exists, but may be taken again deeper in the call
+stack, if necessary.
+
+Link: https://lkml.kernel.org/r/20220810160209.1630707-1-Liam.Howlett@oracle.com
+Fixes: a43cfc87caaf (android: binder: stop saving a pointer to the VMA)
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Reported-by: Ondrej Mosnacek <omosnace@redhat.com>
+Reported-by: <syzbot+a7b60a176ec13cafb793@syzkaller.appspotmail.com>
+Acked-by: Carlos Llamas <cmllamas@google.com>
+Tested-by: Ondrej Mosnacek <omosnace@redhat.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Christian Brauner (Microsoft) <brauner@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Hridya Valsaraju <hridya@google.com>
+Cc: Joel Fernandes <joel@joelfernandes.org>
+Cc: Martijn Coenen <maco@android.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Todd Kjos <tkjos@android.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: "Arve Hjønnevåg" <arve@android.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder_alloc.c | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -395,12 +395,15 @@ static struct binder_buffer *binder_allo
+ size_t size, data_offsets_size;
+ int ret;
+
++ mmap_read_lock(alloc->vma_vm_mm);
+ if (!binder_alloc_get_vma(alloc)) {
++ mmap_read_unlock(alloc->vma_vm_mm);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
++ mmap_read_unlock(alloc->vma_vm_mm);
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+@@ -922,17 +925,25 @@ void binder_alloc_print_pages(struct seq
+ * Make sure the binder_alloc is fully initialized, otherwise we might
+ * read inconsistent state.
+ */
+- if (binder_alloc_get_vma(alloc) != NULL) {
+- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+- page = &alloc->pages[i];
+- if (!page->page_ptr)
+- free++;
+- else if (list_empty(&page->lru))
+- active++;
+- else
+- lru++;
+- }
++
++ mmap_read_lock(alloc->vma_vm_mm);
++ if (binder_alloc_get_vma(alloc) == NULL) {
++ mmap_read_unlock(alloc->vma_vm_mm);
++ goto uninitialized;
++ }
++
++ mmap_read_unlock(alloc->vma_vm_mm);
++ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
++ page = &alloc->pages[i];
++ if (!page->page_ptr)
++ free++;
++ else if (list_empty(&page->lru))
++ active++;
++ else
++ lru++;
+ }
++
++uninitialized:
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+ seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
--- /dev/null
+From 65fac0d54f374625b43a9d6ad1f2c212bd41f518 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Tue, 26 Jul 2022 20:22:24 +0800
+Subject: blk-mq: fix io hung due to missing commit_rqs
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 65fac0d54f374625b43a9d6ad1f2c212bd41f518 upstream.
+
+Currently, in virtio_scsi, if 'bd->last' is not set to true while
+dispatching request, such io will stay in driver's queue, and driver
+will wait for block layer to dispatch more rqs. However, if block
+layer failed to dispatch more rq, it should trigger commit_rqs to
+inform driver.
+
+There is a problem in blk_mq_try_issue_list_directly() that commit_rqs
+won't be called:
+
+// assume that queue_depth is set to 1, list contains two rq
+blk_mq_try_issue_list_directly
+ blk_mq_request_issue_directly
+ // dispatch first rq
+ // last is false
+ __blk_mq_try_issue_directly
+ blk_mq_get_dispatch_budget
+ // succeed to get first budget
+ __blk_mq_issue_directly
+ scsi_queue_rq
+ cmd->flags |= SCMD_LAST
+ virtscsi_queuecommand
+ kick = (sc->flags & SCMD_LAST) != 0
+ // kick is false, first rq won't issue to disk
+ queued++
+
+ blk_mq_request_issue_directly
+ // dispatch second rq
+ __blk_mq_try_issue_directly
+ blk_mq_get_dispatch_budget
+ // failed to get second budget
+ ret == BLK_STS_RESOURCE
+ blk_mq_request_bypass_insert
+ // errors is still 0
+
+ if (!list_empty(list) || errors && ...)
+ // won't pass, commit_rqs won't be called
+
+In this situation, first rq relied on second rq to dispatch, while
+second rq relied on first rq to complete, thus they will both hung.
+
+Fix the problem by also treat 'BLK_STS_*RESOURCE' as 'errors' since
+it means that request is not queued successfully.
+
+Same problem exists in blk_mq_dispatch_rq_list(), 'BLK_STS_*RESOURCE'
+can't be treated as 'errors' here, fix the problem by calling
+commit_rqs if queue_rq return 'BLK_STS_*RESOURCE'.
+
+Fixes: d666ba98f849 ("blk-mq: add mq_ops->commit_rqs()")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20220726122224.1790882-1-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1400,7 +1400,8 @@ out:
+ /* If we didn't flush the entire list, we could have told the driver
+ * there was more coming, but that turned out to be a lie.
+ */
+- if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
++ if ((!list_empty(list) || errors || needs_resource ||
++ ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
+ q->mq_ops->commit_rqs(hctx);
+ /*
+ * Any items that need requeuing? Stuff them into hctx->dispatch,
+@@ -2111,6 +2112,7 @@ void blk_mq_try_issue_list_directly(stru
+ list_del_init(&rq->queuelist);
+ ret = blk_mq_request_issue_directly(rq, list_empty(list));
+ if (ret != BLK_STS_OK) {
++ errors++;
+ if (ret == BLK_STS_RESOURCE ||
+ ret == BLK_STS_DEV_RESOURCE) {
+ blk_mq_request_bypass_insert(rq, false,
+@@ -2118,7 +2120,6 @@ void blk_mq_try_issue_list_directly(stru
+ break;
+ }
+ blk_mq_end_request(rq, ret);
+- errors++;
+ } else
+ queued++;
+ }
--- /dev/null
+From 00da0cb385d05a89226e150a102eb49d8abb0359 Mon Sep 17 00:00:00 2001
+From: Salvatore Bonaccorso <carnil@debian.org>
+Date: Mon, 1 Aug 2022 11:15:30 +0200
+Subject: Documentation/ABI: Mention retbleed vulnerability info file for sysfs
+
+From: Salvatore Bonaccorso <carnil@debian.org>
+
+commit 00da0cb385d05a89226e150a102eb49d8abb0359 upstream.
+
+While reporting for the AMD retbleed vulnerability was added in
+
+ 6b80b59b3555 ("x86/bugs: Report AMD retbleed vulnerability")
+
+the new sysfs file was not mentioned so far in the ABI documentation for
+sysfs-devices-system-cpu. Fix that.
+
+Fixes: 6b80b59b3555 ("x86/bugs: Report AMD retbleed vulnerability")
+Signed-off-by: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lore.kernel.org/r/20220801091529.325327-1-carnil@debian.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -521,6 +521,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+ /sys/devices/system/cpu/vulnerabilities/mmio_stale_data
++ /sys/devices/system/cpu/vulnerabilities/retbleed
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
--- /dev/null
+From bc9e7fe313d5e56d4d5f34bcc04d1165f94f86fb Mon Sep 17 00:00:00 2001
+From: James Clark <james.clark@arm.com>
+Date: Thu, 28 Jul 2022 10:39:46 +0100
+Subject: perf python: Fix build when PYTHON_CONFIG is user supplied
+
+From: James Clark <james.clark@arm.com>
+
+commit bc9e7fe313d5e56d4d5f34bcc04d1165f94f86fb upstream.
+
+The previous change to Python autodetection had a small mistake where
+the auto value was used to determine the Python binary, rather than the
+user supplied value. The Python binary is only used for one part of the
+build process, rather than the final linking, so it was producing
+correct builds in most scenarios, especially when the auto detected
+value matched what the user wanted, or the system only had a valid set
+of Pythons.
+
+Change it so that the Python binary path is derived from either the
+PYTHON_CONFIG value or PYTHON value, depending on what is specified by
+the user. This was the original intention.
+
+This error was spotted in a build failure an odd cross compilation
+environment after commit 4c41cb46a732fe82 ("perf python: Prefer
+python3") was merged.
+
+Fixes: 630af16eee495f58 ("perf tools: Use Python devtools for version autodetection rather than runtime")
+Signed-off-by: James Clark <james.clark@arm.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James Clark <james.clark@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220728093946.1337642-1-james.clark@arm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/Makefile.config | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -263,7 +263,7 @@ endif
+ # defined. get-executable-or-default fails with an error if the first argument is supplied but
+ # doesn't exist.
+ override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
+-override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO)))
++override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
+
+ grep-libs = $(filter -l%,$(1))
+ strip-libs = $(filter-out -l%,$(1))
--- /dev/null
+From bf515f024e4c0ca46a1b08c4f31860c01781d8a5 Mon Sep 17 00:00:00 2001
+From: Ian Rogers <irogers@google.com>
+Date: Mon, 22 Aug 2022 14:33:51 -0700
+Subject: perf stat: Clear evsel->reset_group for each stat run
+
+From: Ian Rogers <irogers@google.com>
+
+commit bf515f024e4c0ca46a1b08c4f31860c01781d8a5 upstream.
+
+If a weak group is broken then the reset_group flag remains set for
+the next run. Having reset_group set means the counter isn't created
+and ultimately a segfault.
+
+A simple reproduction of this is:
+
+ # perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W
+
+which will be added as a test in the next patch.
+
+Fixes: 4804e0111662d7d8 ("perf stat: Use affinity for opening events")
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Ian Rogers <irogers@google.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Tested-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Link: https://lore.kernel.org/r/20220822213352.75721-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/builtin-stat.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -807,6 +807,7 @@ static int __run_perf_stat(int argc, con
+ return -1;
+
+ evlist__for_each_entry(evsel_list, counter) {
++ counter->reset_group = false;
+ if (bpf_counter__load(counter, &target))
+ return -1;
+ if (!evsel__is_bpf(counter))
--- /dev/null
+From d4bdb0bebc5ba3299d74f123c782d99cd4e25c49 Mon Sep 17 00:00:00 2001
+From: Stephane Eranian <eranian@google.com>
+Date: Wed, 17 Aug 2022 22:46:13 -0700
+Subject: perf/x86/intel/ds: Fix precise store latency handling
+
+From: Stephane Eranian <eranian@google.com>
+
+commit d4bdb0bebc5ba3299d74f123c782d99cd4e25c49 upstream.
+
+With the existing code in store_latency_data(), the memory operation (mem_op)
+returned to the user is always OP_LOAD where in fact, it should be OP_STORE.
+This comes from the fact that the function is simply grabbing the information
+from a data source map which covers only load accesses. Intel 12th gen CPU
+offers precise store sampling that captures both the data source and latency.
+Therefore it can use the data source mapping table but must override the
+memory operation to reflect stores instead of loads.
+
+Fixes: 61b985e3e775 ("perf/x86/intel: Add perf core PMU support for Sapphire Rapids")
+Signed-off-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20220818054613.1548130-1-eranian@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/ds.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -236,6 +236,7 @@ static u64 load_latency_data(u64 status)
+ static u64 store_latency_data(u64 status)
+ {
+ union intel_x86_pebs_dse dse;
++ union perf_mem_data_src src;
+ u64 val;
+
+ dse.val = status;
+@@ -263,7 +264,14 @@ static u64 store_latency_data(u64 status
+
+ val |= P(BLK, NA);
+
+- return val;
++ /*
++ * the pebs_data_source table is only for loads
++ * so override the mem_op to say STORE instead
++ */
++ src.val = val;
++ src.mem_op = P(OP,STORE);
++
++ return src.val;
+ }
+
+ struct pebs_record_core {
--- /dev/null
+From 11745ecfe8fea4b4a4c322967a7605d2ecbd5080 Mon Sep 17 00:00:00 2001
+From: Stephane Eranian <eranian@google.com>
+Date: Wed, 3 Aug 2022 09:00:31 -0700
+Subject: perf/x86/intel/uncore: Fix broken read_counter() for SNB IMC PMU
+
+From: Stephane Eranian <eranian@google.com>
+
+commit 11745ecfe8fea4b4a4c322967a7605d2ecbd5080 upstream.
+
+Existing code was generating bogus counts for the SNB IMC bandwidth counters:
+
+$ perf stat -a -I 1000 -e uncore_imc/data_reads/,uncore_imc/data_writes/
+ 1.000327813 1,024.03 MiB uncore_imc/data_reads/
+ 1.000327813 20.73 MiB uncore_imc/data_writes/
+ 2.000580153 261,120.00 MiB uncore_imc/data_reads/
+ 2.000580153 23.28 MiB uncore_imc/data_writes/
+
+The problem was introduced by commit:
+ 07ce734dd8ad ("perf/x86/intel/uncore: Clean up client IMC")
+
+Where the read_counter callback was replace to point to the generic
+uncore_mmio_read_counter() function.
+
+The SNB IMC counters are freerunnig 32-bit counters laid out contiguously in
+MMIO. But uncore_mmio_read_counter() is using a readq() call to read from
+MMIO therefore reading 64-bit from MMIO. Although this is okay for the
+uncore_perf_event_update() function because it is shifting the value based
+on the actual counter width to compute a delta, it is not okay for the
+uncore_pmu_event_start() which is simply reading the counter and therefore
+priming the event->prev_count with a bogus value which is responsible for
+causing bogus deltas in the perf stat command above.
+
+The fix is to reintroduce the custom callback for read_counter for the SNB
+IMC PMU and use readl() instead of readq(). With the change the output of
+perf stat is back to normal:
+$ perf stat -a -I 1000 -e uncore_imc/data_reads/,uncore_imc/data_writes/
+ 1.000120987 296.94 MiB uncore_imc/data_reads/
+ 1.000120987 138.42 MiB uncore_imc/data_writes/
+ 2.000403144 175.91 MiB uncore_imc/data_reads/
+ 2.000403144 68.50 MiB uncore_imc/data_writes/
+
+Fixes: 07ce734dd8ad ("perf/x86/intel/uncore: Clean up client IMC")
+Signed-off-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
+Link: https://lore.kernel.org/r/20220803160031.1379788-1-eranian@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/uncore_snb.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -788,6 +788,22 @@ int snb_pci2phy_map_init(int devid)
+ return 0;
+ }
+
++static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ /*
++ * SNB IMC counters are 32-bit and are laid out back to back
++ * in MMIO space. Therefore we must use a 32-bit accessor function
++ * using readq() from uncore_mmio_read_counter() causes problems
++ * because it is reading 64-bit at a time. This is okay for the
++ * uncore_perf_event_update() function because it drops the upper
++ * 32-bits but not okay for plain uncore_read_counter() as invoked
++ * in uncore_pmu_event_start().
++ */
++ return (u64)readl(box->io_addr + hwc->event_base);
++}
++
+ static struct pmu snb_uncore_imc_pmu = {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = snb_uncore_imc_event_init,
+@@ -807,7 +823,7 @@ static struct intel_uncore_ops snb_uncor
+ .disable_event = snb_uncore_imc_disable_event,
+ .enable_event = snb_uncore_imc_enable_event,
+ .hw_config = snb_uncore_imc_hw_config,
+- .read_counter = uncore_mmio_read_counter,
++ .read_counter = snb_uncore_imc_read_counter,
+ };
+
+ static struct intel_uncore_type snb_uncore_imc = {
--- /dev/null
+From d957e7ffb2c72410bcc1a514153a46719255a5da Mon Sep 17 00:00:00 2001
+From: Saurabh Sengar <ssengar@linux.microsoft.com>
+Date: Thu, 4 Aug 2022 08:55:34 -0700
+Subject: scsi: storvsc: Remove WQ_MEM_RECLAIM from storvsc_error_wq
+
+From: Saurabh Sengar <ssengar@linux.microsoft.com>
+
+commit d957e7ffb2c72410bcc1a514153a46719255a5da upstream.
+
+storvsc_error_wq workqueue should not be marked as WQ_MEM_RECLAIM as it
+doesn't need to make forward progress under memory pressure. Marking this
+workqueue as WQ_MEM_RECLAIM may cause deadlock while flushing a
+non-WQ_MEM_RECLAIM workqueue. In the current state it causes the following
+warning:
+
+[ 14.506347] ------------[ cut here ]------------
+[ 14.506354] workqueue: WQ_MEM_RECLAIM storvsc_error_wq_0:storvsc_remove_lun is flushing !WQ_MEM_RECLAIM events_freezable_power_:disk_events_workfn
+[ 14.506360] WARNING: CPU: 0 PID: 8 at <-snip->kernel/workqueue.c:2623 check_flush_dependency+0xb5/0x130
+[ 14.506390] CPU: 0 PID: 8 Comm: kworker/u4:0 Not tainted 5.4.0-1086-azure #91~18.04.1-Ubuntu
+[ 14.506391] Hardware name: Microsoft Corporation Virtual Machine/Virtual Machine, BIOS Hyper-V UEFI Release v4.1 05/09/2022
+[ 14.506393] Workqueue: storvsc_error_wq_0 storvsc_remove_lun
+[ 14.506395] RIP: 0010:check_flush_dependency+0xb5/0x130
+ <-snip->
+[ 14.506408] Call Trace:
+[ 14.506412] __flush_work+0xf1/0x1c0
+[ 14.506414] __cancel_work_timer+0x12f/0x1b0
+[ 14.506417] ? kernfs_put+0xf0/0x190
+[ 14.506418] cancel_delayed_work_sync+0x13/0x20
+[ 14.506420] disk_block_events+0x78/0x80
+[ 14.506421] del_gendisk+0x3d/0x2f0
+[ 14.506423] sr_remove+0x28/0x70
+[ 14.506427] device_release_driver_internal+0xef/0x1c0
+[ 14.506428] device_release_driver+0x12/0x20
+[ 14.506429] bus_remove_device+0xe1/0x150
+[ 14.506431] device_del+0x167/0x380
+[ 14.506432] __scsi_remove_device+0x11d/0x150
+[ 14.506433] scsi_remove_device+0x26/0x40
+[ 14.506434] storvsc_remove_lun+0x40/0x60
+[ 14.506436] process_one_work+0x209/0x400
+[ 14.506437] worker_thread+0x34/0x400
+[ 14.506439] kthread+0x121/0x140
+[ 14.506440] ? process_one_work+0x400/0x400
+[ 14.506441] ? kthread_park+0x90/0x90
+[ 14.506443] ret_from_fork+0x35/0x40
+[ 14.506445] ---[ end trace 2d9633159fdc6ee7 ]---
+
+Link: https://lore.kernel.org/r/1659628534-17539-1-git-send-email-ssengar@linux.microsoft.com
+Fixes: 436ad9413353 ("scsi: storvsc: Allow only one remove lun work item to be issued per lun")
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/storvsc_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -2093,7 +2093,7 @@ static int storvsc_probe(struct hv_devic
+ */
+ host_dev->handle_error_wq =
+ alloc_ordered_workqueue("storvsc_error_wq_%d",
+- WQ_MEM_RECLAIM,
++ 0,
+ host->host_no);
+ if (!host_dev->handle_error_wq) {
+ ret = -ENOMEM;
--- /dev/null
+From 6d17a112e9a63ff6a5edffd1676b99e0ffbcd269 Mon Sep 17 00:00:00 2001
+From: Kiwoong Kim <kwmad.kim@samsung.com>
+Date: Tue, 2 Aug 2022 10:42:31 +0900
+Subject: scsi: ufs: core: Enable link lost interrupt
+
+From: Kiwoong Kim <kwmad.kim@samsung.com>
+
+commit 6d17a112e9a63ff6a5edffd1676b99e0ffbcd269 upstream.
+
+Link lost is treated as fatal error with commit c99b9b230149 ("scsi: ufs:
+Treat link loss as fatal error"), but the event isn't registered as
+interrupt source. Enable it.
+
+Link: https://lore.kernel.org/r/1659404551-160958-1-git-send-email-kwmad.kim@samsung.com
+Fixes: c99b9b230149 ("scsi: ufs: Treat link loss as fatal error")
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Kiwoong Kim <kwmad.kim@samsung.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/ufs/ufshci.h | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/scsi/ufs/ufshci.h
++++ b/drivers/scsi/ufs/ufshci.h
+@@ -133,11 +133,7 @@ static inline u32 ufshci_version(u32 maj
+
+ #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
+
+-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
+- DEVICE_FATAL_ERROR |\
+- CONTROLLER_FATAL_ERROR |\
+- SYSTEM_BUS_FATAL_ERROR |\
+- CRYPTO_ENGINE_FATAL_ERROR)
++#define UFSHCD_ERROR_MASK (UIC_ERROR | INT_FATAL_ERRORS)
+
+ #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
+ CONTROLLER_FATAL_ERROR |\
revert-md-raid-destroy-the-bitmap-after-destroying-the-thread.patch
md-call-__md_stop_writes-in-md_stop.patch
mptcp-fix-crash-due-to-tcp_tsorted_anchor-was-initialized-before-release-skb.patch
+arm64-fix-match_list-for-erratum-1286807-on-arm-cortex-a76.patch
+binder_alloc-add-missing-mmap_lock-calls-when-using-the-vma.patch
+x86-nospec-fix-i386-rsb-stuffing.patch
+documentation-abi-mention-retbleed-vulnerability-info-file-for-sysfs.patch
+blk-mq-fix-io-hung-due-to-missing-commit_rqs.patch
+perf-python-fix-build-when-python_config-is-user-supplied.patch
+perf-x86-intel-uncore-fix-broken-read_counter-for-snb-imc-pmu.patch
+perf-x86-intel-ds-fix-precise-store-latency-handling.patch
+perf-stat-clear-evsel-reset_group-for-each-stat-run.patch
+scsi-ufs-core-enable-link-lost-interrupt.patch
+scsi-storvsc-remove-wq_mem_reclaim-from-storvsc_error_wq.patch
--- /dev/null
+From 332924973725e8cdcc783c175f68cf7e162cb9e5 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 19 Aug 2022 13:01:35 +0200
+Subject: x86/nospec: Fix i386 RSB stuffing
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 332924973725e8cdcc783c175f68cf7e162cb9e5 upstream.
+
+Turns out that i386 doesn't unconditionally have LFENCE, as such the
+loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such
+chips.
+
+Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence")
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -50,6 +50,7 @@
+ * the optimal version - two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
++#ifdef CONFIG_X86_64
+ #define __FILL_RETURN_BUFFER(reg, nr) \
+ mov $(nr/2), reg; \
+ 771: \
+@@ -60,6 +61,17 @@
+ jnz 771b; \
+ /* barrier for jnz misprediction */ \
+ lfence;
++#else
++/*
++ * i386 doesn't unconditionally have LFENCE, as such it can't
++ * do a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr) \
++ .rept nr; \
++ __FILL_RETURN_SLOT; \
++ .endr; \
++ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
++#endif
+
+ /*
+ * Stuff a single RSB slot.