--- /dev/null
+From b7ba6d8dc3569e49800ef0136799f26f43e237e8 Mon Sep 17 00:00:00 2001
+From: Steven Price <steven.price@arm.com>
+Date: Mon, 11 Apr 2022 16:22:32 +0100
+Subject: cpu/hotplug: Remove the 'cpu' member of cpuhp_cpu_state
+
+From: Steven Price <steven.price@arm.com>
+
+commit b7ba6d8dc3569e49800ef0136799f26f43e237e8 upstream.
+
+Currently the setting of the 'cpu' member of struct cpuhp_cpu_state in
+cpuhp_create() is too late as it is used earlier in _cpu_up().
+
+If kzalloc_node() in __smpboot_create_thread() fails then the rollback will
+be done with st->cpu==0 causing CPU0 to be erroneously set to be dying,
+causing the scheduler to get mightily confused and throw its toys out of
+the pram.
+
+However the cpu number is actually available directly, so simply remove
+the 'cpu' member and avoid the problem in the first place.
+
+Fixes: 2ea46c6fc945 ("cpumask/hotplug: Fix cpu_dying() state tracking")
+Signed-off-by: Steven Price <steven.price@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20220411152233.474129-2-steven.price@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cpu.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -70,7 +70,6 @@ struct cpuhp_cpu_state {
+ bool rollback;
+ bool single;
+ bool bringup;
+- int cpu;
+ struct hlist_node *node;
+ struct hlist_node *last;
+ enum cpuhp_state cb_state;
+@@ -474,7 +473,7 @@ static inline bool cpu_smt_allowed(unsig
+ #endif
+
+ static inline enum cpuhp_state
+-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
++cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
+ {
+ enum cpuhp_state prev_state = st->state;
+ bool bringup = st->state < target;
+@@ -485,14 +484,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *
+ st->target = target;
+ st->single = false;
+ st->bringup = bringup;
+- if (cpu_dying(st->cpu) != !bringup)
+- set_cpu_dying(st->cpu, !bringup);
++ if (cpu_dying(cpu) != !bringup)
++ set_cpu_dying(cpu, !bringup);
+
+ return prev_state;
+ }
+
+ static inline void
+-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
++cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
++ enum cpuhp_state prev_state)
+ {
+ bool bringup = !st->bringup;
+
+@@ -519,8 +519,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state
+ }
+
+ st->bringup = bringup;
+- if (cpu_dying(st->cpu) != !bringup)
+- set_cpu_dying(st->cpu, !bringup);
++ if (cpu_dying(cpu) != !bringup)
++ set_cpu_dying(cpu, !bringup);
+ }
+
+ /* Regular hotplug invocation of the AP hotplug thread */
+@@ -540,15 +540,16 @@ static void __cpuhp_kick_ap(struct cpuhp
+ wait_for_ap_thread(st, st->bringup);
+ }
+
+-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
++static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
++ enum cpuhp_state target)
+ {
+ enum cpuhp_state prev_state;
+ int ret;
+
+- prev_state = cpuhp_set_state(st, target);
++ prev_state = cpuhp_set_state(cpu, st, target);
+ __cpuhp_kick_ap(st);
+ if ((ret = st->result)) {
+- cpuhp_reset_state(st, prev_state);
++ cpuhp_reset_state(cpu, st, prev_state);
+ __cpuhp_kick_ap(st);
+ }
+
+@@ -580,7 +581,7 @@ static int bringup_wait_for_ap(unsigned
+ if (st->target <= CPUHP_AP_ONLINE_IDLE)
+ return 0;
+
+- return cpuhp_kick_ap(st, st->target);
++ return cpuhp_kick_ap(cpu, st, st->target);
+ }
+
+ static int bringup_cpu(unsigned int cpu)
+@@ -703,7 +704,7 @@ static int cpuhp_up_callbacks(unsigned i
+ ret, cpu, cpuhp_get_step(st->state)->name,
+ st->state);
+
+- cpuhp_reset_state(st, prev_state);
++ cpuhp_reset_state(cpu, st, prev_state);
+ if (can_rollback_cpu(st))
+ WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
+ prev_state));
+@@ -720,7 +721,6 @@ static void cpuhp_create(unsigned int cp
+
+ init_completion(&st->done_up);
+ init_completion(&st->done_down);
+- st->cpu = cpu;
+ }
+
+ static int cpuhp_should_run(unsigned int cpu)
+@@ -874,7 +874,7 @@ static int cpuhp_kick_ap_work(unsigned i
+ cpuhp_lock_release(true);
+
+ trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
+- ret = cpuhp_kick_ap(st, st->target);
++ ret = cpuhp_kick_ap(cpu, st, st->target);
+ trace_cpuhp_exit(cpu, st->state, prev_state, ret);
+
+ return ret;
+@@ -1106,7 +1106,7 @@ static int cpuhp_down_callbacks(unsigned
+ ret, cpu, cpuhp_get_step(st->state)->name,
+ st->state);
+
+- cpuhp_reset_state(st, prev_state);
++ cpuhp_reset_state(cpu, st, prev_state);
+
+ if (st->state < prev_state)
+ WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
+@@ -1133,7 +1133,7 @@ static int __ref _cpu_down(unsigned int
+
+ cpuhp_tasks_frozen = tasks_frozen;
+
+- prev_state = cpuhp_set_state(st, target);
++ prev_state = cpuhp_set_state(cpu, st, target);
+ /*
+ * If the current CPU state is in the range of the AP hotplug thread,
+ * then we need to kick the thread.
+@@ -1164,7 +1164,7 @@ static int __ref _cpu_down(unsigned int
+ ret = cpuhp_down_callbacks(cpu, st, target);
+ if (ret && st->state < prev_state) {
+ if (st->state == CPUHP_TEARDOWN_CPU) {
+- cpuhp_reset_state(st, prev_state);
++ cpuhp_reset_state(cpu, st, prev_state);
+ __cpuhp_kick_ap(st);
+ } else {
+ WARN(1, "DEAD callback error for CPU%d", cpu);
+@@ -1351,7 +1351,7 @@ static int _cpu_up(unsigned int cpu, int
+
+ cpuhp_tasks_frozen = tasks_frozen;
+
+- cpuhp_set_state(st, target);
++ cpuhp_set_state(cpu, st, target);
+ /*
+ * If the current CPU state is in the range of the AP hotplug thread,
+ * then we need to kick the thread once more.
--- /dev/null
+From 9e02977bfad006af328add9434c8bffa40e053bb Mon Sep 17 00:00:00 2001
+From: Chao Gao <chao.gao@intel.com>
+Date: Wed, 13 Apr 2022 08:32:22 +0200
+Subject: dma-direct: avoid redundant memory sync for swiotlb
+
+From: Chao Gao <chao.gao@intel.com>
+
+commit 9e02977bfad006af328add9434c8bffa40e053bb upstream.
+
+When we looked into FIO performance with swiotlb enabled in VM, we found
+swiotlb_bounce() is always called one more time than expected for each DMA
+read request.
+
+It turns out that the bounce buffer is copied to original DMA buffer twice
+after the completion of a DMA request (one is done by in
+dma_direct_sync_single_for_cpu(), the other by swiotlb_tbl_unmap_single()).
+But the content in bounce buffer actually doesn't change between the two
+rounds of copy. So, one round of copy is redundant.
+
+Pass DMA_ATTR_SKIP_CPU_SYNC flag to swiotlb_tbl_unmap_single() to
+skip the memory copy in it.
+
+This fix increases FIO 64KB sequential read throughput in a guest with
+swiotlb=force by 5.6%.
+
+Fixes: 55897af63091 ("dma-direct: merge swiotlb_dma_ops into the dma_direct code")
+Reported-by: Wang Zhaoyang1 <zhaoyang1.wang@intel.com>
+Reported-by: Gao Liang <liang.gao@intel.com>
+Signed-off-by: Chao Gao <chao.gao@intel.com>
+Reviewed-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/dma/direct.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/dma/direct.h
++++ b/kernel/dma/direct.h
+@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
+- swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
++ swiotlb_tbl_unmap_single(dev, phys, size, dir,
++ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ #endif /* _KERNEL_DMA_DIRECT_H */
--- /dev/null
+From 1acb34e7dd7720a1fff00cbd4d000ec3219dc9d6 Mon Sep 17 00:00:00 2001
+From: Matt Roper <matthew.d.roper@intel.com>
+Date: Thu, 7 Apr 2022 09:18:39 -0700
+Subject: drm/i915: Sunset igpu legacy mmap support based on GRAPHICS_VER_FULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matt Roper <matthew.d.roper@intel.com>
+
+commit 1acb34e7dd7720a1fff00cbd4d000ec3219dc9d6 upstream.
+
+The intent of the version check in the mmap ioctl was to maintain
+support for existing platforms (i.e., ADL/RPL and earlier), but drop
+support on all future igpu platforms. As we've seen on the dgpu side,
+the hardware teams are using a more fine-grained numbering system for IP
+version numbers these days, so it's possible the version number
+associated with our next igpu could be some form of "12.xx" rather than
+13 or higher. Comparing against the full ver.release number will ensure
+the intent of the check is maintained no matter what numbering the
+hardware teams settle on.
+
+Fixes: d3f3baa3562a ("drm/i915: Reinstate the mmap ioctl for some platforms")
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220407161839.1073443-1-matthew.d.roper@intel.com
+(cherry picked from commit 8e7e5c077cd57ee9a36d58c65f07257dc49a88d5)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -67,7 +67,7 @@ i915_gem_mmap_ioctl(struct drm_device *d
+ * mmap ioctl is disallowed for all discrete platforms,
+ * and for all platforms with GRAPHICS_VER > 12.
+ */
+- if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
++ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~(I915_MMAP_WC))
--- /dev/null
+From 4f9f45d0eb0e7d449bc9294459df79b9c66edfac Mon Sep 17 00:00:00 2001
+From: Sherry Sun <sherry.sun@nxp.com>
+Date: Mon, 21 Mar 2022 15:51:30 +0800
+Subject: dt-bindings: memory: snps,ddrc-3.80a compatible also need interrupts
+
+From: Sherry Sun <sherry.sun@nxp.com>
+
+commit 4f9f45d0eb0e7d449bc9294459df79b9c66edfac upstream.
+
+For the snps,ddrc-3.80a compatible, the interrupts property is also
+required, also order the compatibles by name (s goes before x).
+
+Signed-off-by: Sherry Sun <sherry.sun@nxp.com>
+Fixes: a9e6b3819b36 ("dt-bindings: memory: Add entry for version 3.80a")
+Link: https://lore.kernel.org/r/20220321075131.17811-2-sherry.sun@nxp.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
++++ b/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
+@@ -24,9 +24,9 @@ description: |
+ properties:
+ compatible:
+ enum:
++ - snps,ddrc-3.80a
+ - xlnx,zynq-ddrc-a05
+ - xlnx,zynqmp-ddrc-2.40a
+- - snps,ddrc-3.80a
+
+ interrupts:
+ maxItems: 1
+@@ -43,7 +43,9 @@ allOf:
+ properties:
+ compatible:
+ contains:
+- const: xlnx,zynqmp-ddrc-2.40a
++ enum:
++ - snps,ddrc-3.80a
++ - xlnx,zynqmp-ddrc-2.40a
+ then:
+ required:
+ - interrupts
--- /dev/null
+From ce8b3ad1071b764e963d9b08ac34ffddddf12da6 Mon Sep 17 00:00:00 2001
+From: Dongjin Yang <dj76.yang@samsung.com>
+Date: Mon, 4 Apr 2022 11:28:57 +0900
+Subject: dt-bindings: net: snps: remove duplicate name
+
+From: Dongjin Yang <dj76.yang@samsung.com>
+
+commit ce8b3ad1071b764e963d9b08ac34ffddddf12da6 upstream.
+
+snps,dwmac has duplicated name for loongson,ls2k-dwmac and
+loongson,ls7a-dwmac.
+
+Signed-off-by: Dongjin Yang <dj76.yang@samsung.com>
+Fixes: 68277749a013 ("dt-bindings: dwmac: Add bindings for new Loongson SoC and bridge chip")
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Link: https://lore.kernel.org/r/20220404022857epcms1p6e6af1a6a86569f339e50c318abde7d3c@epcms1p6
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/net/snps,dwmac.yaml | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
++++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+@@ -53,20 +53,18 @@ properties:
+ - allwinner,sun8i-r40-gmac
+ - allwinner,sun8i-v3s-emac
+ - allwinner,sun50i-a64-emac
+- - loongson,ls2k-dwmac
+- - loongson,ls7a-dwmac
+ - amlogic,meson6-dwmac
+ - amlogic,meson8b-dwmac
+ - amlogic,meson8m2-dwmac
+ - amlogic,meson-gxbb-dwmac
+ - amlogic,meson-axg-dwmac
+- - loongson,ls2k-dwmac
+- - loongson,ls7a-dwmac
+ - ingenic,jz4775-mac
+ - ingenic,x1000-mac
+ - ingenic,x1600-mac
+ - ingenic,x1830-mac
+ - ingenic,x2000-mac
++ - loongson,ls2k-dwmac
++ - loongson,ls7a-dwmac
+ - rockchip,px30-gmac
+ - rockchip,rk3128-gmac
+ - rockchip,rk3228-gmac
--- /dev/null
+From 993eb48fa199b5f476df8204e652eff63dd19361 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 11 Apr 2022 21:07:51 +0300
+Subject: i2c: dev: check return value when calling dev_set_name()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 993eb48fa199b5f476df8204e652eff63dd19361 upstream.
+
+If dev_set_name() fails, the dev_name() is null, check the return
+value of dev_set_name() to avoid the null-ptr-deref.
+
+Fixes: 1413ef638aba ("i2c: dev: Fix the race between the release of i2c_dev and cdev")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/i2c-dev.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct
+ i2c_dev->dev.class = i2c_dev_class;
+ i2c_dev->dev.parent = &adap->dev;
+ i2c_dev->dev.release = i2cdev_dev_release;
+- dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
++
++ res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
++ if (res)
++ goto err_put_i2c_dev;
+
+ res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
+- if (res) {
+- put_i2c_dev(i2c_dev, false);
+- return res;
+- }
++ if (res)
++ goto err_put_i2c_dev;
+
+ pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
+ return 0;
++
++err_put_i2c_dev:
++ put_i2c_dev(i2c_dev, false);
++ return res;
+ }
+
+ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
--- /dev/null
+From bd8963e602c77adc76dbbbfc3417c3cf14fed76b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Martin=20Povi=C5=A1er?= <povik+lin@cutebit.org>
+Date: Tue, 29 Mar 2022 20:38:17 +0200
+Subject: i2c: pasemi: Wait for write xfers to finish
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Povišer <povik+lin@cutebit.org>
+
+commit bd8963e602c77adc76dbbbfc3417c3cf14fed76b upstream.
+
+Wait for completion of write transfers before returning from the driver.
+At first sight it may seem advantageous to leave write transfers queued
+for the controller to carry out on its own time, but there's a couple of
+issues with it:
+
+ * Driver doesn't check for FIFO space.
+
+ * The queued writes can complete while the driver is in its I2C read
+ transfer path which means it will get confused by the raising of
+ XEN (the 'transaction ended' signal). This can cause a spurious
+ ENODATA error due to premature reading of the MRXFIFO register.
+
+Adding the wait fixes some unreliability issues with the driver. There's
+some efficiency cost to it (especially with pasemi_smb_waitready doing
+its polling), but that will be alleviated once the driver receives
+interrupt support.
+
+Fixes: beb58aa39e6e ("i2c: PA Semi SMBus driver")
+Signed-off-by: Martin Povišer <povik+lin@cutebit.org>
+Reviewed-by: Sven Peter <sven@svenpeter.dev>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-pasemi-core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/i2c/busses/i2c-pasemi-core.c
++++ b/drivers/i2c/busses/i2c-pasemi-core.c
+@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2
+
+ TXFIFO_WR(smbus, msg->buf[msg->len-1] |
+ (stop ? MTXFIFO_STOP : 0));
++
++ if (stop) {
++ err = pasemi_smb_waitready(smbus);
++ if (err)
++ goto reset_out;
++ }
+ }
+
+ return 0;
--- /dev/null
+From 2dfe63e61cc31ee59ce951672b0850b5229cd5b0 Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Thu, 14 Apr 2022 19:13:40 -0700
+Subject: mm, kfence: support kmem_dump_obj() for KFENCE objects
+
+From: Marco Elver <elver@google.com>
+
+commit 2dfe63e61cc31ee59ce951672b0850b5229cd5b0 upstream.
+
+Calling kmem_obj_info() via kmem_dump_obj() on KFENCE objects has been
+producing garbage data due to the object not actually being maintained
+by SLAB or SLUB.
+
+Fix this by implementing __kfence_obj_info() that copies relevant
+information to struct kmem_obj_info when the object was allocated by
+KFENCE; this is called by a common kmem_obj_info(), which also calls the
+slab/slub/slob specific variant now called __kmem_obj_info().
+
+For completeness, kmem_dump_obj() now displays if the object was
+allocated by KFENCE.
+
+Link: https://lore.kernel.org/all/20220323090520.GG16885@xsang-OptiPlex-9020/
+Link: https://lkml.kernel.org/r/20220406131558.3558585-1-elver@google.com
+Fixes: b89fb5ef0ce6 ("mm, kfence: insert KFENCE hooks for SLUB")
+Fixes: d3fb45f370d9 ("mm, kfence: insert KFENCE hooks for SLAB")
+Signed-off-by: Marco Elver <elver@google.com>
+Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz> [slab]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kfence.h | 24 ++++++++++++++++++++++++
+ mm/kfence/core.c | 21 ---------------------
+ mm/kfence/kfence.h | 21 +++++++++++++++++++++
+ mm/kfence/report.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ mm/slab.c | 2 +-
+ mm/slab.h | 2 +-
+ mm/slab_common.c | 9 +++++++++
+ mm/slob.c | 2 +-
+ mm/slub.c | 2 +-
+ 9 files changed, 105 insertions(+), 25 deletions(-)
+
+--- a/include/linux/kfence.h
++++ b/include/linux/kfence.h
+@@ -204,6 +204,22 @@ static __always_inline __must_check bool
+ */
+ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++/**
++ * __kfence_obj_info() - fill kmem_obj_info struct
++ * @kpp: kmem_obj_info to be filled
++ * @object: the object
++ *
++ * Return:
++ * * false - not a KFENCE object
++ * * true - a KFENCE object, filled @kpp
++ *
++ * Copies information to @kpp for KFENCE objects.
++ */
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
++#endif
++
+ #else /* CONFIG_KFENCE */
+
+ static inline bool is_kfence_address(const void *addr) { return false; }
+@@ -221,6 +237,14 @@ static inline bool __must_check kfence_h
+ return false;
+ }
+
++#ifdef CONFIG_PRINTK
++struct kmem_obj_info;
++static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++ return false;
++}
++#endif
++
+ #endif
+
+ #endif /* _LINUX_KFENCE_H */
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -222,27 +222,6 @@ static bool kfence_unprotect(unsigned lo
+ return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
+ }
+
+-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+-{
+- long index;
+-
+- /* The checks do not affect performance; only called from slow-paths. */
+-
+- if (!is_kfence_address((void *)addr))
+- return NULL;
+-
+- /*
+- * May be an invalid index if called with an address at the edge of
+- * __kfence_pool, in which case we would report an "invalid access"
+- * error.
+- */
+- index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+- if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+- return NULL;
+-
+- return &kfence_metadata[index];
+-}
+-
+ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
+ {
+ unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
+--- a/mm/kfence/kfence.h
++++ b/mm/kfence/kfence.h
+@@ -96,6 +96,27 @@ struct kfence_metadata {
+
+ extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+
++static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
++{
++ long index;
++
++ /* The checks do not affect performance; only called from slow-paths. */
++
++ if (!is_kfence_address((void *)addr))
++ return NULL;
++
++ /*
++ * May be an invalid index if called with an address at the edge of
++ * __kfence_pool, in which case we would report an "invalid access"
++ * error.
++ */
++ index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
++ if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
++ return NULL;
++
++ return &kfence_metadata[index];
++}
++
+ /* KFENCE error types for report generation. */
+ enum kfence_error_type {
+ KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
+--- a/mm/kfence/report.c
++++ b/mm/kfence/report.c
+@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long a
+ /* We encountered a memory safety error, taint the kernel! */
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
+ }
++
++#ifdef CONFIG_PRINTK
++static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
++{
++ int i, j;
++
++ i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
++ for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
++ kp_stack[j] = (void *)track->stack_entries[i];
++ if (j < KS_ADDRS_COUNT)
++ kp_stack[j] = NULL;
++}
++
++bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++ struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
++ unsigned long flags;
++
++ if (!meta)
++ return false;
++
++ /*
++ * If state is UNUSED at least show the pointer requested; the rest
++ * would be garbage data.
++ */
++ kpp->kp_ptr = object;
++
++ /* Requesting info an a never-used object is almost certainly a bug. */
++ if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
++ return true;
++
++ raw_spin_lock_irqsave(&meta->lock, flags);
++
++ kpp->kp_slab = slab;
++ kpp->kp_slab_cache = meta->cache;
++ kpp->kp_objp = (void *)meta->addr;
++ kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
++ if (meta->state == KFENCE_OBJECT_FREED)
++ kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
++ /* get_stack_skipnr() ensures the first entry is outside allocator. */
++ kpp->kp_ret = kpp->kp_stack[0];
++
++ raw_spin_unlock_irqrestore(&meta->lock, flags);
++
++ return true;
++}
++#endif
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3650,7 +3650,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_calle
+ #endif /* CONFIG_NUMA */
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -851,7 +851,7 @@ struct kmem_obj_info {
+ void *kp_stack[KS_ADDRS_COUNT];
+ void *kp_free_stack[KS_ADDRS_COUNT];
+ };
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+ #endif
+
+ #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
+ }
+ EXPORT_SYMBOL_GPL(kmem_valid_obj);
+
++static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++{
++ if (__kfence_obj_info(kpp, object, slab))
++ return;
++ __kmem_obj_info(kpp, object, slab);
++}
++
+ /**
+ * kmem_dump_obj - Print available slab provenance information
+ * @object: slab object for which to find provenance information.
+@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
+ pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
+ else
+ pr_cont(" slab%s", cp);
++ if (is_kfence_address(object))
++ pr_cont(" (kfence)");
+ if (kp.kp_objp)
+ pr_cont(" start %px", kp.kp_objp);
+ if (kp.kp_data_offset)
+--- a/mm/slob.c
++++ b/mm/slob.c
+@@ -463,7 +463,7 @@ out:
+ }
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ kpp->kp_ptr = object;
+ kpp->kp_slab = slab;
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4322,7 +4322,7 @@ int __kmem_cache_shutdown(struct kmem_ca
+ }
+
+ #ifdef CONFIG_PRINTK
+-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
++void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+ {
+ void *base;
+ int __maybe_unused i;
--- /dev/null
+From 762c2998c9625f642f0d23da7d3f7e4f90665fdf Mon Sep 17 00:00:00 2001
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+Date: Tue, 12 Apr 2022 12:44:26 +0300
+Subject: Revert "net: dsa: setup master before ports"
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+commit 762c2998c9625f642f0d23da7d3f7e4f90665fdf upstream.
+
+This reverts commit 11fd667dac315ea3f2469961f6d2869271a46cae.
+
+dsa_slave_change_mtu() updates the MTU of the DSA master and of the
+associated CPU port, but only if it detects a change to the master MTU.
+
+The blamed commit in the Fixes: tag below addressed a regression where
+dsa_slave_change_mtu() would return early and not do anything due to
+ds->ops->port_change_mtu() not being implemented.
+
+However, that commit also had the effect that the master MTU got set up
+to the correct value by dsa_master_setup(), but the associated CPU port's
+MTU did not get updated. This causes breakage for drivers that rely on
+the ->port_change_mtu() DSA call to account for the tagging overhead on
+the CPU port, and don't set up the initial MTU during the setup phase.
+
+Things actually worked before because they were in a fragile equilibrium
+where dsa_slave_change_mtu() was called before dsa_master_setup() was.
+So dsa_slave_change_mtu() could actually detect a change and update the
+CPU port MTU too.
+
+Restore the code to the way things used to work by reverting the reorder
+of dsa_tree_setup_master() and dsa_tree_setup_ports(). That change did
+not have a concrete motivation going for it anyway, it just looked
+better.
+
+Fixes: 066dfc429040 ("Revert "net: dsa: stop updating master MTU from master.c"")
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dsa/dsa2.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -561,7 +561,6 @@ static void dsa_port_teardown(struct dsa
+ struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_mac_addr *a, *tmp;
+- struct net_device *slave;
+
+ if (!dp->setup)
+ return;
+@@ -583,11 +582,9 @@ static void dsa_port_teardown(struct dsa
+ dsa_port_link_unregister_of(dp);
+ break;
+ case DSA_PORT_TYPE_USER:
+- slave = dp->slave;
+-
+- if (slave) {
++ if (dp->slave) {
++ dsa_slave_destroy(dp->slave);
+ dp->slave = NULL;
+- dsa_slave_destroy(slave);
+ }
+ break;
+ }
+@@ -1137,17 +1134,17 @@ static int dsa_tree_setup(struct dsa_swi
+ if (err)
+ goto teardown_cpu_ports;
+
+- err = dsa_tree_setup_master(dst);
++ err = dsa_tree_setup_ports(dst);
+ if (err)
+ goto teardown_switches;
+
+- err = dsa_tree_setup_ports(dst);
++ err = dsa_tree_setup_master(dst);
+ if (err)
+- goto teardown_master;
++ goto teardown_ports;
+
+ err = dsa_tree_setup_lags(dst);
+ if (err)
+- goto teardown_ports;
++ goto teardown_master;
+
+ dst->setup = true;
+
+@@ -1155,10 +1152,10 @@ static int dsa_tree_setup(struct dsa_swi
+
+ return 0;
+
+-teardown_ports:
+- dsa_tree_teardown_ports(dst);
+ teardown_master:
+ dsa_tree_teardown_master(dst);
++teardown_ports:
++ dsa_tree_teardown_ports(dst);
+ teardown_switches:
+ dsa_tree_teardown_switches(dst);
+ teardown_cpu_ports:
+@@ -1176,10 +1173,10 @@ static void dsa_tree_teardown(struct dsa
+
+ dsa_tree_teardown_lags(dst);
+
+- dsa_tree_teardown_ports(dst);
+-
+ dsa_tree_teardown_master(dst);
+
++ dsa_tree_teardown_ports(dst);
++
+ dsa_tree_teardown_switches(dst);
+
+ dsa_tree_teardown_cpu_ports(dst);
arm-davinci-da850-evm-avoid-null-pointer-dereference.patch
ep93xx-clock-fix-uaf-in-ep93xx_clk_register_gate.patch
dm-integrity-fix-memory-corruption-when-tag_size-is-less-than-digest-size.patch
+i2c-dev-check-return-value-when-calling-dev_set_name.patch
+revert-net-dsa-setup-master-before-ports.patch
+smp-fix-offline-cpu-check-in-flush_smp_call_function_queue.patch
+dt-bindings-memory-snps-ddrc-3.80a-compatible-also-need-interrupts.patch
+i2c-pasemi-wait-for-write-xfers-to-finish.patch
+dt-bindings-net-snps-remove-duplicate-name.patch
+timers-fix-warning-condition-in-__run_timers.patch
+dma-direct-avoid-redundant-memory-sync-for-swiotlb.patch
+mm-kfence-support-kmem_dump_obj-for-kfence-objects.patch
+drm-i915-sunset-igpu-legacy-mmap-support-based-on-graphics_ver_full.patch
+cpu-hotplug-remove-the-cpu-member-of-cpuhp_cpu_state.patch
--- /dev/null
+From 9e949a3886356fe9112c6f6f34a6e23d1d35407f Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Sat, 19 Mar 2022 00:20:15 -0700
+Subject: smp: Fix offline cpu check in flush_smp_call_function_queue()
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 9e949a3886356fe9112c6f6f34a6e23d1d35407f upstream.
+
+The check in flush_smp_call_function_queue() for callbacks that are sent
+to offline CPUs currently checks whether the queue is empty.
+
+However, flush_smp_call_function_queue() has just deleted all the
+callbacks from the queue and moved all the entries into a local list.
+This checks would only be positive if some callbacks were added in the
+short time after llist_del_all() was called. This does not seem to be
+the intention of this check.
+
+Change the check to look at the local list to which the entries were
+moved instead of the queue from which all the callbacks were just
+removed.
+
+Fixes: 8d056c48e4862 ("CPU hotplug, smp: flush any pending IPI callbacks before CPU offline")
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20220319072015.1495036-1-namit@vmware.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -579,7 +579,7 @@ static void flush_smp_call_function_queu
+
+ /* There shouldn't be any pending callbacks on an offline CPU. */
+ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+- !warned && !llist_empty(head))) {
++ !warned && entry != NULL)) {
+ warned = true;
+ WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
+
--- /dev/null
+From c54bc0fc84214b203f7a0ebfd1bd308ce2abe920 Mon Sep 17 00:00:00 2001
+From: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Date: Tue, 5 Apr 2022 21:17:32 +0200
+Subject: timers: Fix warning condition in __run_timers()
+
+From: Anna-Maria Behnsen <anna-maria@linutronix.de>
+
+commit c54bc0fc84214b203f7a0ebfd1bd308ce2abe920 upstream.
+
+When the timer base is empty, base::next_expiry is set to base::clk +
+NEXT_TIMER_MAX_DELTA and base::next_expiry_recalc is false. When no timer
+is queued until jiffies reaches base::next_expiry value, the warning for
+not finding any expired timer and base::next_expiry_recalc is false in
+__run_timers() triggers.
+
+To prevent triggering the warning in this valid scenario
+base::timers_pending needs to be added to the warning condition.
+
+Fixes: 31cd0e119d50 ("timers: Recalculate next timer interrupt only when necessary")
+Reported-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Link: https://lore.kernel.org/r/20220405191732.7438-3-anna-maria@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/timer.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct t
+ time_after_eq(jiffies, base->next_expiry)) {
+ levels = collect_expired_timers(base, heads);
+ /*
+- * The only possible reason for not finding any expired
+- * timer at this clk is that all matching timers have been
+- * dequeued.
++ * The two possible reasons for not finding any expired
++ * timer at this clk are that all matching timers have been
++ * dequeued or no timer has been queued since
++ * base::next_expiry was set to base::clk +
++ * NEXT_TIMER_MAX_DELTA.
+ */
+- WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
++ WARN_ON_ONCE(!levels && !base->next_expiry_recalc
++ && base->timers_pending);
+ base->clk++;
+ base->next_expiry = __next_timer_interrupt(base);
+