--- /dev/null
+From d4955c0ad77dbc684fc716387070ac24801b8bca Mon Sep 17 00:00:00 2001
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+Date: Fri, 16 Sep 2022 23:17:07 +0300
+Subject: arm64: topology: fix possible overflow in amu_fie_setup()
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+commit d4955c0ad77dbc684fc716387070ac24801b8bca upstream.
+
+cpufreq_get_hw_max_freq() returns max frequency in kHz as *unsigned int*,
+while freq_inv_set_max_ratio() gets passed this frequency in Hz as 'u64'.
+Multiplying max frequency by 1000 can potentially result in overflow --
+multiplying by 1000ULL instead should avoid that...
+
+Found by Linux Verification Center (linuxtesting.org) with the SVACE static
+analysis tool.
+
+Fixes: cd0ed03a8903 ("arm64: use activity monitors for frequency invariance")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Link: https://lore.kernel.org/r/01493d64-2bce-d968-86dc-11a122a9c07d@omp.ru
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/topology.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -251,7 +251,7 @@ static void amu_fie_setup(const struct c
+ for_each_cpu(cpu, cpus) {
+ if (!freq_counters_valid(cpu) ||
+ freq_inv_set_max_ratio(cpu,
+- cpufreq_get_hw_max_freq(cpu) * 1000,
++ cpufreq_get_hw_max_freq(cpu) * 1000ULL,
+ arch_timer_get_rate()))
+ return;
+ }
--- /dev/null
+From a09721dd47c8468b3f2fdd73f40422699ffe26dd Mon Sep 17 00:00:00 2001
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Thu, 11 Aug 2022 10:25:44 +0200
+Subject: can: flexcan: flexcan_mailbox_read() fix return value for drop = true
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+commit a09721dd47c8468b3f2fdd73f40422699ffe26dd upstream.
+
+The following happened on an i.MX25 using flexcan with many packets on
+the bus:
+
+The rx-offload queue reached a length more than skb_queue_len_max. In
+can_rx_offload_offload_one() the drop variable was set to true which
+made the call to .mailbox_read() (here: flexcan_mailbox_read()) to
+_always_ return ERR_PTR(-ENOBUFS) and drop the rx'ed CAN frame. So
+can_rx_offload_offload_one() returned ERR_PTR(-ENOBUFS), too.
+
+can_rx_offload_irq_offload_fifo() looks as follows:
+
+| while (1) {
+| skb = can_rx_offload_offload_one(offload, 0);
+| if (IS_ERR(skb))
+| continue;
+| if (!skb)
+| break;
+| ...
+| }
+
+The flexcan driver wrongly always returns ERR_PTR(-ENOBUFS) if drop is
+requested, even if there is no CAN frame pending. As the i.MX25 is a
+single core CPU, while the rx-offload processing is active, there is
+no thread to process packets from the offload queue. So the queue
+doesn't get any shorter and this results is a tight loop.
+
+Instead of always returning ERR_PTR(-ENOBUFS) if drop is requested,
+return NULL if no CAN frame is pending.
+
+Changes since v1: https://lore.kernel.org/all/20220810144536.389237-1-u.kleine-koenig@pengutronix.de
+- don't break in can_rx_offload_irq_offload_fifo() in case of an error,
+ return NULL in flexcan_mailbox_read() in case of no pending CAN frame
+ instead
+
+Fixes: 4e9c9484b085 ("can: rx-offload: Prepare for CAN FD support")
+Link: https://lore.kernel.org/all/20220811094254.1864367-1-mkl@pengutronix.de
+Cc: stable@vger.kernel.org # v5.5
+Suggested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Tested-by: Thorsten Scherer <t.scherer@eckelmann.de>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/flexcan/flexcan-core.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/can/flexcan/flexcan-core.c
++++ b/drivers/net/can/flexcan/flexcan-core.c
+@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_r
+ u32 reg_ctrl, reg_id, reg_iflag1;
+ int i;
+
+- if (unlikely(drop)) {
+- skb = ERR_PTR(-ENOBUFS);
+- goto mark_as_read;
+- }
+-
+ mb = flexcan_get_mb(priv, n);
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_r
+ reg_ctrl = priv->read(&mb->can_ctrl);
+ }
+
++ if (unlikely(drop)) {
++ skb = ERR_PTR(-ENOBUFS);
++ goto mark_as_read;
++ }
++
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
+ skb = alloc_canfd_skb(offload->dev, &cfd);
+ else
--- /dev/null
+From 5ce8f7444f8fbb5adee644590c0e4e1890ab004c Mon Sep 17 00:00:00 2001
+From: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Date: Fri, 16 Sep 2022 11:24:02 +0200
+Subject: drm/i915/gem: Flush contexts on driver release
+
+From: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+
+commit 5ce8f7444f8fbb5adee644590c0e4e1890ab004c upstream.
+
+Due to i915_perf assuming that it can use the i915_gem_context reference
+to protect its i915->gem.contexts.list iteration, we need to defer removal
+of the context from the list until last reference to the context is put.
+However, there is a risk of triggering kernel warning on contexts list not
+empty at driver release time if we deleagate that task to a worker for
+i915_gem_context_release_work(), unless that work is flushed first.
+Unfortunately, it is not flushed on driver release. Fix it.
+
+Instead of additionally calling flush_workqueue(), either directly or via
+a new dedicated wrapper around it, replace last call to
+i915_gem_drain_freed_objects() with existing i915_gem_drain_workqueue()
+that performs both tasks.
+
+Fixes: 75eefd82581f ("drm/i915: Release i915_gem_context from a worker")
+Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Cc: stable@kernel.org # v5.16+
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220916092403.201355-2-janusz.krzysztofik@linux.intel.com
+(cherry picked from commit 1cec34442408a77ba5396b19725fed2c398005c3)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_
+
+ intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
+
+- i915_gem_drain_freed_objects(dev_priv);
++ /* Flush any outstanding work, including i915_gem_context.release_work. */
++ i915_gem_drain_workqueue(dev_priv);
+
+ drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
+ }
--- /dev/null
+From d119888b09bd567e07c6b93a07f175df88857e02 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 16 Sep 2022 11:24:03 +0200
+Subject: drm/i915/gem: Really move i915_gem_context.link under ref protection
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit d119888b09bd567e07c6b93a07f175df88857e02 upstream.
+
+i915_perf assumes that it can use the i915_gem_context reference to
+protect its i915->gem.contexts.list iteration. However, this requires
+that we do not remove the context from the list until after we drop the
+final reference and release the struct. If, as currently, we remove the
+context from the list during context_close(), the link.next pointer may
+be poisoned while we are holding the context reference and cause a GPF:
+
+[ 4070.573157] i915 0000:00:02.0: [drm:i915_perf_open_ioctl [i915]] filtering on ctx_id=0x1fffff ctx_id_mask=0x1fffff
+[ 4070.574881] general protection fault, probably for non-canonical address 0xdead000000000100: 0000 [#1] PREEMPT SMP
+[ 4070.574897] CPU: 1 PID: 284392 Comm: amd_performance Tainted: G E 5.17.9 #180
+[ 4070.574903] Hardware name: Intel Corporation NUC7i5BNK/NUC7i5BNB, BIOS BNKBL357.86A.0052.2017.0918.1346 09/18/2017
+[ 4070.574907] RIP: 0010:oa_configure_all_contexts.isra.0+0x222/0x350 [i915]
+[ 4070.574982] Code: 08 e8 32 6e 10 e1 4d 8b 6d 50 b8 ff ff ff ff 49 83 ed 50 f0 41 0f c1 04 24 83 f8 01 0f 84 e3 00 00 00 85 c0 0f 8e fa 00 00 00 <49> 8b 45 50 48 8d 70 b0 49 8d 45 50 48 39 44 24 10 0f 85 34 fe ff
+[ 4070.574990] RSP: 0018:ffffc90002077b78 EFLAGS: 00010202
+[ 4070.574995] RAX: 0000000000000002 RBX: 0000000000000002 RCX: 0000000000000000
+[ 4070.575000] RDX: 0000000000000001 RSI: ffffc90002077b20 RDI: ffff88810ddc7c68
+[ 4070.575004] RBP: 0000000000000001 R08: ffff888103242648 R09: fffffffffffffffc
+[ 4070.575008] R10: ffffffff82c50bc0 R11: 0000000000025c80 R12: ffff888101bf1860
+[ 4070.575012] R13: dead0000000000b0 R14: ffffc90002077c04 R15: ffff88810be5cabc
+[ 4070.575016] FS: 00007f1ed50c0780(0000) GS:ffff88885ec80000(0000) knlGS:0000000000000000
+[ 4070.575021] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 4070.575025] CR2: 00007f1ed5590280 CR3: 000000010ef6f005 CR4: 00000000003706e0
+[ 4070.575029] Call Trace:
+[ 4070.575033] <TASK>
+[ 4070.575037] lrc_configure_all_contexts+0x13e/0x150 [i915]
+[ 4070.575103] gen8_enable_metric_set+0x4d/0x90 [i915]
+[ 4070.575164] i915_perf_open_ioctl+0xbc0/0x1500 [i915]
+[ 4070.575224] ? asm_common_interrupt+0x1e/0x40
+[ 4070.575232] ? i915_oa_init_reg_state+0x110/0x110 [i915]
+[ 4070.575290] drm_ioctl_kernel+0x85/0x110
+[ 4070.575296] ? update_load_avg+0x5f/0x5e0
+[ 4070.575302] drm_ioctl+0x1d3/0x370
+[ 4070.575307] ? i915_oa_init_reg_state+0x110/0x110 [i915]
+[ 4070.575382] ? gen8_gt_irq_handler+0x46/0x130 [i915]
+[ 4070.575445] __x64_sys_ioctl+0x3c4/0x8d0
+[ 4070.575451] ? __do_softirq+0xaa/0x1d2
+[ 4070.575456] do_syscall_64+0x35/0x80
+[ 4070.575461] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 4070.575467] RIP: 0033:0x7f1ed5c10397
+[ 4070.575471] Code: 3c 1c e8 1c ff ff ff 85 c0 79 87 49 c7 c4 ff ff ff ff 5b 5d 4c 89 e0 41 5c c3 66 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d a9 da 0d 00 f7 d8 64 89 01 48
+[ 4070.575478] RSP: 002b:00007ffd65c8d7a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+[ 4070.575484] RAX: ffffffffffffffda RBX: 0000000000000006 RCX: 00007f1ed5c10397
+[ 4070.575488] RDX: 00007ffd65c8d7c0 RSI: 0000000040106476 RDI: 0000000000000006
+[ 4070.575492] RBP: 00005620972f9c60 R08: 000000000000000a R09: 0000000000000005
+[ 4070.575496] R10: 000000000000000d R11: 0000000000000246 R12: 000000000000000a
+[ 4070.575500] R13: 000000000000000d R14: 0000000000000000 R15: 00007ffd65c8d7c0
+[ 4070.575505] </TASK>
+[ 4070.575507] Modules linked in: nls_ascii(E) nls_cp437(E) vfat(E) fat(E) i915(E) x86_pkg_temp_thermal(E) intel_powerclamp(E) crct10dif_pclmul(E) crc32_pclmul(E) crc32c_intel(E) aesni_intel(E) crypto_simd(E) intel_gtt(E) cryptd(E) ttm(E) rapl(E) intel_cstate(E) drm_kms_helper(E) cfbfillrect(E) syscopyarea(E) cfbimgblt(E) intel_uncore(E) sysfillrect(E) mei_me(E) sysimgblt(E) i2c_i801(E) fb_sys_fops(E) mei(E) intel_pch_thermal(E) i2c_smbus(E) cfbcopyarea(E) video(E) button(E) efivarfs(E) autofs4(E)
+[ 4070.575549] ---[ end trace 0000000000000000 ]---
+
+v3: fix incorrect syntax of spin_lock() replacing spin_lock_irqsave()
+
+v2: irqsave not required in a worker, neither conversion to irq safe
+ elsewhere (Tvrtko),
+ - perf: it's safe to call gen8_configure_context() even if context has
+ been closed, no need to check,
+ - drop unrelated cleanup (Andi, Tvrtko)
+
+Reported-by: Mark Janes <mark.janes@intel.com>
+Closes: https://gitlab.freedesktop.org/drm/intel/issues/6222
+References: a4e7ccdac38e ("drm/i915: Move context management under GEM")
+Fixes: f8246cf4d9a9 ("drm/i915/gem: Drop free_work for GEM contexts")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Signed-off-by: Janusz Krzysztofik <janusz.krzysztofik@linux.intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v5.12+
+Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220916092403.201355-3-janusz.krzysztofik@linux.intel.com
+(cherry picked from commit ad3aa7c31efa5a09b0dba42e66cfdf77e0db7dc2)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_wor
+ trace_i915_context_free(ctx);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+
++ spin_lock(&ctx->i915->gem.contexts.lock);
++ list_del(&ctx->link);
++ spin_unlock(&ctx->i915->gem.contexts.lock);
++
+ if (ctx->syncobj)
+ drm_syncobj_put(ctx->syncobj);
+
+@@ -1514,10 +1518,6 @@ static void context_close(struct i915_ge
+
+ ctx->file_priv = ERR_PTR(-EBADF);
+
+- spin_lock(&ctx->i915->gem.contexts.lock);
+- list_del(&ctx->link);
+- spin_unlock(&ctx->i915->gem.contexts.lock);
+-
+ client = ctx->client;
+ if (client) {
+ spin_lock(&client->ctx_lock);
--- /dev/null
+From b7df41a6f79dfb18ba2203f8c5f0e9c0b9b57f68 Mon Sep 17 00:00:00 2001
+From: Bartosz Golaszewski <brgl@bgdev.pl>
+Date: Tue, 20 Sep 2022 09:18:41 +0200
+Subject: gpio: mockup: fix NULL pointer dereference when removing debugfs
+
+From: Bartosz Golaszewski <brgl@bgdev.pl>
+
+commit b7df41a6f79dfb18ba2203f8c5f0e9c0b9b57f68 upstream.
+
+We now remove the device's debugfs entries when unbinding the driver.
+This now causes a NULL-pointer dereference on module exit because the
+platform devices are unregistered *after* the global debugfs directory
+has been recursively removed. Fix it by unregistering the devices first.
+
+Fixes: 303e6da99429 ("gpio: mockup: remove gpio debugfs when remove device")
+Cc: Wei Yongjun <weiyongjun1@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-mockup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -597,9 +597,9 @@ static int __init gpio_mockup_init(void)
+
+ static void __exit gpio_mockup_exit(void)
+ {
++ gpio_mockup_unregister_pdevs();
+ debugfs_remove_recursive(gpio_mockup_dbg_dir);
+ platform_driver_unregister(&gpio_mockup_driver);
+- gpio_mockup_unregister_pdevs();
+ }
+
+ module_init(gpio_mockup_init);
--- /dev/null
+From 02743c4091ccfb246f5cdbbe3f44b152d5d12933 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Tue, 20 Sep 2022 16:30:31 +0300
+Subject: gpio: mockup: Fix potential resource leakage when register a chip
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 02743c4091ccfb246f5cdbbe3f44b152d5d12933 upstream.
+
+If creation of software node fails, the locally allocated string
+array is left unfreed. Free it on error path.
+
+Fixes: 6fda593f3082 ("gpio: mockup: Convert to use software nodes")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpio-mockup.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_c
+ }
+
+ fwnode = fwnode_create_software_node(properties, NULL);
+- if (IS_ERR(fwnode))
++ if (IS_ERR(fwnode)) {
++ kfree_strarray(line_names, ngpio);
+ return PTR_ERR(fwnode);
++ }
+
+ pdevinfo.name = "gpio-mockup";
+ pdevinfo.id = idx;
--- /dev/null
+From 69bef19d6b9700e96285f4b4e28691cda3dcd0d1 Mon Sep 17 00:00:00 2001
+From: Meng Li <Meng.Li@windriver.com>
+Date: Wed, 21 Sep 2022 11:20:20 +0800
+Subject: gpiolib: cdev: Set lineevent_state::irq after IRQ register successfully
+
+From: Meng Li <Meng.Li@windriver.com>
+
+commit 69bef19d6b9700e96285f4b4e28691cda3dcd0d1 upstream.
+
+When running gpio test on nxp-ls1028 platform with below command
+gpiomon --num-events=3 --rising-edge gpiochip1 25
+There will be a warning trace as below:
+Call trace:
+free_irq+0x204/0x360
+lineevent_free+0x64/0x70
+gpio_ioctl+0x598/0x6a0
+__arm64_sys_ioctl+0xb4/0x100
+invoke_syscall+0x5c/0x130
+......
+el0t_64_sync+0x1a0/0x1a4
+The reason of this issue is that calling request_threaded_irq()
+function failed, and then lineevent_free() is invoked to release
+the resource. Since the lineevent_state::irq was already set, so
+the subsequent invocation of free_irq() would trigger the above
+warning call trace. To fix this issue, set the lineevent_state::irq
+after the IRQ register successfully.
+
+Fixes: 468242724143 ("gpiolib: cdev: refactor lineevent cleanup into lineevent_free")
+Cc: stable@vger.kernel.org
+Signed-off-by: Meng Li <Meng.Li@windriver.com>
+Reviewed-by: Kent Gibson <warthog618@gmail.com>
+Signed-off-by: Bartosz Golaszewski <brgl@bgdev.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-cdev.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -1975,7 +1975,6 @@ static int lineevent_create(struct gpio_
+ ret = -ENODEV;
+ goto out_free_le;
+ }
+- le->irq = irq;
+
+ if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+@@ -1989,7 +1988,7 @@ static int lineevent_create(struct gpio_
+ init_waitqueue_head(&le->wait);
+
+ /* Request a thread to read the events */
+- ret = request_threaded_irq(le->irq,
++ ret = request_threaded_irq(irq,
+ lineevent_irq_handler,
+ lineevent_irq_thread,
+ irqflags,
+@@ -1998,6 +1997,8 @@ static int lineevent_create(struct gpio_
+ if (ret)
+ goto out_free_le;
+
++ le->irq = irq;
++
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
--- /dev/null
+From 5373b8a09d6e037ee0587cb5d9fe4cc09077deeb Mon Sep 17 00:00:00 2001
+From: Peter Collingbourne <pcc@google.com>
+Date: Tue, 13 Sep 2022 19:00:01 -0700
+Subject: kasan: call kasan_malloc() from __kmalloc_*track_caller()
+
+From: Peter Collingbourne <pcc@google.com>
+
+commit 5373b8a09d6e037ee0587cb5d9fe4cc09077deeb upstream.
+
+We were failing to call kasan_malloc() from __kmalloc_*track_caller()
+which was causing us to sometimes fail to produce KASAN error reports
+for allocations made using e.g. devm_kcalloc(), as the KASAN poison was
+not being initialized. Fix it.
+
+Signed-off-by: Peter Collingbourne <pcc@google.com>
+Cc: <stable@vger.kernel.org> # 5.15
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4950,6 +4950,8 @@ void *__kmalloc_track_caller(size_t size
+ /* Honor the call site pointer we received. */
+ trace_kmalloc(caller, ret, size, s->size, gfpflags);
+
++ ret = kasan_kmalloc(s, ret, size, gfpflags);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_track_caller);
+@@ -4981,6 +4983,8 @@ void *__kmalloc_node_track_caller(size_t
+ /* Honor the call site pointer we received. */
+ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
+
++ ret = kasan_kmalloc(s, ret, size, gfpflags);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(__kmalloc_node_track_caller);
--- /dev/null
+From a1020a25e69755a8a1a37735d674b91d6f02939f Mon Sep 17 00:00:00 2001
+From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
+Date: Wed, 24 Aug 2022 03:30:56 +0000
+Subject: KVM: x86: Always enable legacy FP/SSE in allowed user XFEATURES
+
+From: Dr. David Alan Gilbert <dgilbert@redhat.com>
+
+commit a1020a25e69755a8a1a37735d674b91d6f02939f upstream.
+
+Allow FP and SSE state to be saved and restored via KVM_{G,SET}_XSAVE on
+XSAVE-capable hosts even if their bits are not exposed to the guest via
+XCR0.
+
+Failing to allow FP+SSE first showed up as a QEMU live migration failure,
+where migrating a VM from a pre-XSAVE host, e.g. Nehalem, to an XSAVE
+host failed due to KVM rejecting KVM_SET_XSAVE. However, the bug also
+causes problems even when migrating between XSAVE-capable hosts as
+KVM_GET_SAVE won't set any bits in user_xfeatures if XSAVE isn't exposed
+to the guest, i.e. KVM will fail to actually migrate FP+SSE.
+
+Because KVM_{G,S}ET_XSAVE are designed to allowing migrating between
+hosts with and without XSAVE, KVM_GET_XSAVE on a non-XSAVE (by way of
+fpu_copy_guest_fpstate_to_uabi()) always sets the FP+SSE bits in the
+header so that KVM_SET_XSAVE will work even if the new host supports
+XSAVE.
+
+Fixes: ad856280ddea ("x86/kvm/fpu: Limit guest user_xfeatures to supported bits of XCR0")
+bz: https://bugzilla.redhat.com/show_bug.cgi?id=2079311
+Cc: stable@vger.kernel.org
+Cc: Leonardo Bras <leobras@redhat.com>
+Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+[sean: add comment, massage changelog]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220824033057.3576315-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -297,7 +297,13 @@ static void kvm_vcpu_after_set_cpuid(str
+ vcpu->arch.guest_supported_xcr0 =
+ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+
+- vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0;
++ /*
++ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
++ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
++ * supported by the host.
++ */
++ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
++ XFEATURE_MASK_FPSSE;
+
+ kvm_update_pv_runtime(vcpu);
+
--- /dev/null
+From 50b2d49bafa16e6311ab2da82f5aafc5f9ada99b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 24 Aug 2022 03:30:57 +0000
+Subject: KVM: x86: Inject #UD on emulated XSETBV if XSAVES isn't enabled
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 50b2d49bafa16e6311ab2da82f5aafc5f9ada99b upstream.
+
+Inject #UD when emulating XSETBV if CR4.OSXSAVE is not set. This also
+covers the "XSAVE not supported" check, as setting CR4.OSXSAVE=1 #GPs if
+XSAVE is not supported (and userspace gets to keep the pieces if it
+forces incoherent vCPU state).
+
+Add a comment to kvm_emulate_xsetbv() to call out that the CPU checks
+CR4.OSXSAVE before checking for intercepts. AMD'S APM implies that #UD
+has priority (says that intercepts are checked before #GP exceptions),
+while Intel's SDM says nothing about interception priority. However,
+testing on hardware shows that both AMD and Intel CPUs prioritize the #UD
+over interception.
+
+Fixes: 02d4160fbd76 ("x86: KVM: add xsetbv to the emulator")
+Cc: stable@vger.kernel.org
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220824033057.3576315-4-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c | 3 +++
+ arch/x86/kvm/x86.c | 1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4134,6 +4134,9 @@ static int em_xsetbv(struct x86_emulate_
+ {
+ u32 eax, ecx, edx;
+
++ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
++ return emulate_ud(ctxt);
++
+ eax = reg_read(ctxt, VCPU_REGS_RAX);
+ edx = reg_read(ctxt, VCPU_REGS_RDX);
+ ecx = reg_read(ctxt, VCPU_REGS_RCX);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1079,6 +1079,7 @@ static int __kvm_set_xcr(struct kvm_vcpu
+
+ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
+ {
++ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
+ if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
+ __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
+ kvm_inject_gp(vcpu, 0);
--- /dev/null
+From ee519b3a2ae3027c341bce829ee8c51f4f494f5b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 24 Aug 2022 03:30:55 +0000
+Subject: KVM: x86: Reinstate kvm_vcpu_arch.guest_supported_xcr0
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ee519b3a2ae3027c341bce829ee8c51f4f494f5b upstream.
+
+Reinstate the per-vCPU guest_supported_xcr0 by partially reverting
+commit 988896bb6182; the implicit assessment that guest_supported_xcr0 is
+always the same as guest_fpu.fpstate->user_xfeatures was incorrect.
+
+kvm_vcpu_after_set_cpuid() isn't the only place that sets user_xfeatures,
+as user_xfeatures is set to fpu_user_cfg.default_features when guest_fpu
+is allocated via fpu_alloc_guest_fpstate() => __fpstate_reset().
+guest_supported_xcr0 on the other hand is zero-allocated. If userspace
+never invokes KVM_SET_CPUID2, supported XCR0 will be '0', whereas the
+allowed user XFEATURES will be non-zero.
+
+Practically speaking, the edge case likely doesn't matter as no sane
+userspace will live migrate a VM without ever doing KVM_SET_CPUID2. The
+primary motivation is to prepare for KVM intentionally and explicitly
+setting bits in user_xfeatures that are not set in guest_supported_xcr0.
+
+Because KVM_{G,S}ET_XSAVE can be used to svae/restore FP+SSE state even
+if the host doesn't support XSAVE, KVM needs to set the FP+SSE bits in
+user_xfeatures even if they're not allowed in XCR0, e.g. because XCR0
+isn't exposed to the guest. At that point, the simplest fix is to track
+the two things separately (allowed save/restore vs. allowed XCR0).
+
+Fixes: 988896bb6182 ("x86/kvm/fpu: Remove kvm_vcpu_arch.guest_supported_xcr0")
+Cc: stable@vger.kernel.org
+Cc: Leonardo Bras <leobras@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220824033057.3576315-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/cpuid.c | 5 ++---
+ arch/x86/kvm/x86.c | 9 ++-------
+ 3 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -713,6 +713,7 @@ struct kvm_vcpu_arch {
+ struct fpu_guest guest_fpu;
+
+ u64 xcr0;
++ u64 guest_supported_xcr0;
+
+ struct kvm_pio_request pio;
+ void *pio_data;
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -283,7 +283,6 @@ static void kvm_vcpu_after_set_cpuid(str
+ {
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_cpuid_entry2 *best;
+- u64 guest_supported_xcr0;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (best && apic) {
+@@ -295,10 +294,10 @@ static void kvm_vcpu_after_set_cpuid(str
+ kvm_apic_set_version(vcpu);
+ }
+
+- guest_supported_xcr0 =
++ vcpu->arch.guest_supported_xcr0 =
+ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+
+- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
++ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0;
+
+ kvm_update_pv_runtime(vcpu);
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1025,15 +1025,10 @@ void kvm_load_host_xsave_state(struct kv
+ }
+ EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
+
+-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
+-{
+- return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
+-}
+-
+ #ifdef CONFIG_X86_64
+ static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
+ {
+- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
++ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
+ }
+ #endif
+
+@@ -1056,7 +1051,7 @@ static int __kvm_set_xcr(struct kvm_vcpu
+ * saving. However, xcr0 bit 0 is always set, even if the
+ * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
+ */
+- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
++ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
+ if (xcr0 & ~valid_bits)
+ return 1;
+
--- /dev/null
+From e45cc288724f0cfd497bb5920bcfa60caa335729 Mon Sep 17 00:00:00 2001
+From: Maurizio Lombardi <mlombard@redhat.com>
+Date: Mon, 19 Sep 2022 18:39:29 +0200
+Subject: mm: slub: fix flush_cpu_slab()/__free_slab() invocations in task context.
+
+From: Maurizio Lombardi <mlombard@redhat.com>
+
+commit e45cc288724f0cfd497bb5920bcfa60caa335729 upstream.
+
+Commit 5a836bf6b09f ("mm: slub: move flush_cpu_slab() invocations
+__free_slab() invocations out of IRQ context") moved all flush_cpu_slab()
+invocations to the global workqueue to avoid a problem related
+with deactivate_slab()/__free_slab() being called from an IRQ context
+on PREEMPT_RT kernels.
+
+When the flush_all_cpu_locked() function is called from a task context
+it may happen that a workqueue with WQ_MEM_RECLAIM bit set ends up
+flushing the global workqueue, this will cause a dependency issue.
+
+ workqueue: WQ_MEM_RECLAIM nvme-delete-wq:nvme_delete_ctrl_work [nvme_core]
+ is flushing !WQ_MEM_RECLAIM events:flush_cpu_slab
+ WARNING: CPU: 37 PID: 410 at kernel/workqueue.c:2637
+ check_flush_dependency+0x10a/0x120
+ Workqueue: nvme-delete-wq nvme_delete_ctrl_work [nvme_core]
+ RIP: 0010:check_flush_dependency+0x10a/0x120[ 453.262125] Call Trace:
+ __flush_work.isra.0+0xbf/0x220
+ ? __queue_work+0x1dc/0x420
+ flush_all_cpus_locked+0xfb/0x120
+ __kmem_cache_shutdown+0x2b/0x320
+ kmem_cache_destroy+0x49/0x100
+ bioset_exit+0x143/0x190
+ blk_release_queue+0xb9/0x100
+ kobject_cleanup+0x37/0x130
+ nvme_fc_ctrl_free+0xc6/0x150 [nvme_fc]
+ nvme_free_ctrl+0x1ac/0x2b0 [nvme_core]
+
+Fix this bug by creating a workqueue for the flush operation with
+the WQ_MEM_RECLAIM bit set.
+
+Fixes: 5a836bf6b09f ("mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
+Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -310,6 +310,11 @@ static inline void stat(const struct kme
+ */
+ static nodemask_t slab_nodes;
+
++/*
++ * Workqueue used for flush_cpu_slab().
++ */
++static struct workqueue_struct *flushwq;
++
+ /********************************************************************
+ * Core slab cache functions
+ *******************************************************************/
+@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct
+ INIT_WORK(&sfw->work, flush_cpu_slab);
+ sfw->skip = false;
+ sfw->s = s;
+- schedule_work_on(cpu, &sfw->work);
++ queue_work_on(cpu, flushwq, &sfw->work);
+ }
+
+ for_each_online_cpu(cpu) {
+@@ -4880,6 +4885,8 @@ void __init kmem_cache_init(void)
+
+ void __init kmem_cache_init_late(void)
+ {
++ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
++ WARN_ON(!flushwq);
+ }
+
+ struct kmem_cache *
--- /dev/null
+From 7e9c323c52b379d261a72dc7bd38120a761a93cd Mon Sep 17 00:00:00 2001
+From: Chao Yu <chao.yu@oppo.com>
+Date: Wed, 31 Aug 2022 22:54:54 +0800
+Subject: mm/slub: fix to return errno if kmalloc() fails
+
+From: Chao Yu <chao.yu@oppo.com>
+
+commit 7e9c323c52b379d261a72dc7bd38120a761a93cd upstream.
+
+In create_unique_id(), kmalloc(, GFP_KERNEL) can fail due to
+out-of-memory, if it fails, return errno correctly rather than
+triggering panic via BUG_ON();
+
+kernel BUG at mm/slub.c:5893!
+Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+
+Call trace:
+ sysfs_slab_add+0x258/0x260 mm/slub.c:5973
+ __kmem_cache_create+0x60/0x118 mm/slub.c:4899
+ create_cache mm/slab_common.c:229 [inline]
+ kmem_cache_create_usercopy+0x19c/0x31c mm/slab_common.c:335
+ kmem_cache_create+0x1c/0x28 mm/slab_common.c:390
+ f2fs_kmem_cache_create fs/f2fs/f2fs.h:2766 [inline]
+ f2fs_init_xattr_caches+0x78/0xb4 fs/f2fs/xattr.c:808
+ f2fs_fill_super+0x1050/0x1e0c fs/f2fs/super.c:4149
+ mount_bdev+0x1b8/0x210 fs/super.c:1400
+ f2fs_mount+0x44/0x58 fs/f2fs/super.c:4512
+ legacy_get_tree+0x30/0x74 fs/fs_context.c:610
+ vfs_get_tree+0x40/0x140 fs/super.c:1530
+ do_new_mount+0x1dc/0x4e4 fs/namespace.c:3040
+ path_mount+0x358/0x914 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __arm64_sys_mount+0x2f8/0x408 fs/namespace.c:3568
+
+Cc: <stable@kernel.org>
+Fixes: 81819f0fc8285 ("SLUB core")
+Reported-by: syzbot+81684812ea68216e08c5@syzkaller.appspotmail.com
+Reviewed-by: Muchun Song <songmuchun@bytedance.com>
+Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Signed-off-by: Chao Yu <chao.yu@oppo.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5918,7 +5918,8 @@ static char *create_unique_id(struct kme
+ char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
+ char *p = name;
+
+- BUG_ON(!name);
++ if (!name)
++ return ERR_PTR(-ENOMEM);
+
+ *p++ = ':';
+ /*
+@@ -5976,6 +5977,8 @@ static int sysfs_slab_add(struct kmem_ca
+ * for the symlinks.
+ */
+ name = create_unique_id(s);
++ if (IS_ERR(name))
++ return PTR_ERR(name);
+ }
+
+ s->kobj.kset = kset;
--- /dev/null
+From 6fd2c68da55c552f86e401ebe40c4a619025ef69 Mon Sep 17 00:00:00 2001
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Sun, 11 Sep 2022 13:40:05 -0700
+Subject: net: mana: Add rmb after checking owner bits
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+commit 6fd2c68da55c552f86e401ebe40c4a619025ef69 upstream.
+
+Per GDMA spec, rmb is necessary after checking owner_bits, before
+reading EQ or CQ entries.
+
+Add rmb in these two places to comply with the specs.
+
+Cc: stable@vger.kernel.org
+Fixes: ca9c54d2d6a5 ("net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)")
+Reported-by: Sinan Kaya <Sinan.Kaya@microsoft.com>
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Dexuan Cui <decui@microsoft.com>
+Link: https://lore.kernel.org/r/1662928805-15861-1-git-send-email-haiyangz@microsoft.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/gdma_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
++++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
+@@ -370,6 +370,11 @@ static void mana_gd_process_eq_events(vo
+ break;
+ }
+
++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
++ * reading eqe.
++ */
++ rmb();
++
+ mana_gd_process_eqe(eq);
+
+ eq->head++;
+@@ -1107,6 +1112,11 @@ static int mana_gd_read_cqe(struct gdma_
+ if (WARN_ON_ONCE(owner_bits != new_bits))
+ return -1;
+
++ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
++ * reading completion info
++ */
++ rmb();
++
+ comp->wq_num = cqe->cqe_info.wq_num;
+ comp->is_sq = cqe->cqe_info.is_sq;
+ memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
--- /dev/null
+From 05d6f6d346fea2fa4580a0c2b6be207456bebb08 Mon Sep 17 00:00:00 2001
+From: Ilkka Koskinen <ilkka@os.amperecomputing.com>
+Date: Mon, 8 Aug 2022 12:54:55 -0700
+Subject: perf/arm-cmn: Add more bits to child node address offset field
+
+From: Ilkka Koskinen <ilkka@os.amperecomputing.com>
+
+commit 05d6f6d346fea2fa4580a0c2b6be207456bebb08 upstream.
+
+CMN-600 uses bits [27:0] for child node address offset while bits [30:28]
+are required to be zero.
+
+For CMN-650, the child node address offset field has been increased
+to include bits [29:0] while leaving only bit 30 set to zero.
+
+Let's include the missing two bits and assume older implementations
+comply with the spec and set bits [29:28] to 0.
+
+Signed-off-by: Ilkka Koskinen <ilkka@os.amperecomputing.com>
+Fixes: 60d1504070c2 ("perf/arm-cmn: Support new IP features")
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Link: https://lore.kernel.org/r/20220808195455.79277-1-ilkka@os.amperecomputing.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/perf/arm-cmn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 80d8309652a4..b80a9b74662b 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -36,7 +36,7 @@
+ #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
+ #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
+
+-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
++#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
+ #define CMN_CHILD_NODE_EXTERNAL BIT(31)
+
+ #define CMN_MAX_DIMENSION 12
+--
+2.37.3
+
--- /dev/null
+From 762df359aa5849e010ef04c3ed79d57588ce17d9 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Fri, 24 Sep 2021 01:55:27 +0000
+Subject: riscv: fix a nasty sigreturn bug...
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 762df359aa5849e010ef04c3ed79d57588ce17d9 upstream.
+
+riscv has an equivalent of arm bug fixed by 653d48b22166 ("arm: fix
+really nasty sigreturn bug"); if signal gets caught by an interrupt that
+hits when we have the right value in a0 (-513), *and* another signal
+gets delivered upon sigreturn() (e.g. included into the blocked mask for
+the first signal and posted while the handler had been running), the
+syscall restart logics will see regs->cause equal to EXC_SYSCALL (we are
+in a syscall, after all) and a0 already restored to its original value
+(-513, which happens to be -ERESTARTNOINTR) and assume that we need to
+apply the usual syscall restart logics.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Fixes: e2c0cdfba7f6 ("RISC-V: User-facing API")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/YxJEiSq%2FCGaL6Gm9@ZenIV/
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/signal.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
++ regs->cause = -1UL;
++
+ return regs->a0;
+
+ badframe:
--- /dev/null
+From 225e47ea20ea4f37031131f4fa7a6c281fac6657 Mon Sep 17 00:00:00 2001
+From: Randy Dunlap <rdunlap@infradead.org>
+Date: Fri, 8 Jul 2022 18:49:29 -0700
+Subject: riscv: fix RISCV_ISA_SVPBMT kconfig dependency warning
+
+From: Randy Dunlap <rdunlap@infradead.org>
+
+commit 225e47ea20ea4f37031131f4fa7a6c281fac6657 upstream.
+
+RISCV_ISA_SVPBMT selects RISCV_ALTERNATIVE which depends on !XIP_KERNEL.
+Therefore RISCV_ISA_SVPBMT should also depend on !XIP_KERNEL so
+quieten this kconfig warning:
+
+WARNING: unmet direct dependencies detected for RISCV_ALTERNATIVE
+ Depends on [n]: !XIP_KERNEL [=y]
+ Selected by [y]:
+ - RISCV_ISA_SVPBMT [=y] && 64BIT [=y] && MMU [=y]
+
+Fixes: ff689fd21cb1 ("riscv: add RISC-V Svpbmt extension support")
+Signed-off-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Heiko Stuebner <heiko@sntech.de>
+Link: https://lore.kernel.org/r/20220709014929.14221-1-rdunlap@infradead.org/
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -361,6 +361,7 @@ config RISCV_ISA_C
+ config RISCV_ISA_SVPBMT
+ bool "SVPBMT extension support"
+ depends on 64BIT && MMU
++ depends on !XIP_KERNEL
+ select RISCV_ALTERNATIVE
+ default y
+ help
thunderbolt-add-support-for-intel-maple-ridge-single-port-controller.patch
efi-x86-wipe-setup_data-on-pure-efi-boot.patch
efi-libstub-check-shim-mode-using-moksbstatert.patch
+wifi-mt76-fix-reading-current-per-tid-starting-sequence-number-for-aggregation.patch
+gpio-mockup-fix-null-pointer-dereference-when-removing-debugfs.patch
+gpio-mockup-fix-potential-resource-leakage-when-register-a-chip.patch
+gpiolib-cdev-set-lineevent_state-irq-after-irq-register-successfully.patch
+riscv-fix-a-nasty-sigreturn-bug.patch
+riscv-fix-riscv_isa_svpbmt-kconfig-dependency-warning.patch
+drm-i915-gem-flush-contexts-on-driver-release.patch
+drm-i915-gem-really-move-i915_gem_context.link-under-ref-protection.patch
+xen-xenbus-fix-xenbus_setup_ring.patch
+kasan-call-kasan_malloc-from-__kmalloc_-track_caller.patch
+can-flexcan-flexcan_mailbox_read-fix-return-value-for-drop-true.patch
+net-mana-add-rmb-after-checking-owner-bits.patch
+mm-slub-fix-to-return-errno-if-kmalloc-fails.patch
+mm-slub-fix-flush_cpu_slab-__free_slab-invocations-in-task-context.patch
+kvm-x86-reinstate-kvm_vcpu_arch.guest_supported_xcr0.patch
+kvm-x86-always-enable-legacy-fp-sse-in-allowed-user-xfeatures.patch
+kvm-x86-inject-ud-on-emulated-xsetbv-if-xsaves-isn-t-enabled.patch
+perf-arm-cmn-add-more-bits-to-child-node-address-offset-field.patch
+arm64-topology-fix-possible-overflow-in-amu_fie_setup.patch
+vmlinux.lds.h-cfi-reduce-alignment-of-jump-table-to-function-alignment.patch
--- /dev/null
+From 13b0566962914e167cb3238fbe29ced618f07a27 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 22 Sep 2022 22:57:15 +0100
+Subject: vmlinux.lds.h: CFI: Reduce alignment of jump-table to function alignment
+
+From: Will Deacon <will@kernel.org>
+
+commit 13b0566962914e167cb3238fbe29ced618f07a27 upstream.
+
+Due to undocumented, hysterical raisins on x86, the CFI jump-table
+sections in .text are needlessly aligned to PMD_SIZE in the vmlinux
+linker script. When compiling a CFI-enabled arm64 kernel with a 64KiB
+page-size, a PMD maps 512MiB of virtual memory and so the .text section
+increases to a whopping 940MiB and blows the final Image up to 960MiB.
+Others report a link failure.
+
+Since the CFI jump-table requires only instruction alignment, reduce the
+alignment directives to function alignment for parity with other parts
+of the .text section. This reduces the size of the .text section for the
+aforementioned 64KiB page size arm64 kernel to 19MiB for a much more
+reasonable total Image size of 39MiB.
+
+Cc: Sami Tolvanen <samitolvanen@google.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: "Mohan Rao .vanimina" <mailtoc.mohanrao@gmail.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/all/CAL_GTzigiNOMYkOPX1KDnagPhJtFNqSK=1USNbS0wUL4PW6-Uw@mail.gmail.com/
+Fixes: cf68fffb66d6 ("add support for Clang CFI")
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Link: https://lore.kernel.org/r/20220922215715.13345-1-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/asm-generic/vmlinux.lds.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -543,10 +543,9 @@
+ */
+ #ifdef CONFIG_CFI_CLANG
+ #define TEXT_CFI_JT \
+- . = ALIGN(PMD_SIZE); \
++ ALIGN_FUNCTION(); \
+ __cfi_jt_start = .; \
+ *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
+- . = ALIGN(PMD_SIZE); \
+ __cfi_jt_end = .;
+ #else
+ #define TEXT_CFI_JT
--- /dev/null
+From c3a510e2b53785df31d882a773c4c0780b4c825f Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 26 Aug 2022 20:23:29 +0200
+Subject: wifi: mt76: fix reading current per-tid starting sequence number for aggregation
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit c3a510e2b53785df31d882a773c4c0780b4c825f upstream.
+
+The code was accidentally shifting register values down by tid % 32 instead of
+(tid * field_size) % 32.
+
+Cc: stable@vger.kernel.org
+Fixes: a28bef561a5c ("mt76: mt7615: re-enable offloading of sequence number assignment")
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220826182329.18155-1-nbd@nbd.name
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1138,7 +1138,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7
+ offset %= 32;
+
+ val = mt76_rr(dev, addr);
+- val >>= (tid % 32);
++ val >>= offset;
+
+ if (offset > 20) {
+ addr += 4;
--- /dev/null
+From ce6b8ccdef959ba86b2711e090e84a987a000bf7 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 15 Sep 2022 15:05:45 +0200
+Subject: xen/xenbus: fix xenbus_setup_ring()
+
+From: Juergen Gross <jgross@suse.com>
+
+commit ce6b8ccdef959ba86b2711e090e84a987a000bf7 upstream.
+
+Commit 4573240f0764 ("xen/xenbus: eliminate xenbus_grant_ring()")
+introduced an error for initialization of multi-page rings.
+
+Cc: stable@vger.kernel.org
+Fixes: 4573240f0764 ("xen/xenbus: eliminate xenbus_grant_ring()")
+Reported-by: Sander Eikelenboom <linux@eikelenboom.it>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/xenbus/xenbus_client.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+index d5f3f763717e..d4b251925796 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
+ unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
+ grant_ref_t gref_head;
+ unsigned int i;
++ void *addr;
+ int ret;
+
+- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
++ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto err;
+@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
+ unsigned long gfn;
+
+ if (is_vmalloc_addr(*vaddr))
+- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
++ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
+ else
+- gfn = virt_to_gfn(vaddr[i]);
++ gfn = virt_to_gfn(addr);
+
+ grefs[i] = gnttab_claim_grant_reference(&gref_head);
+ gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
+ gfn, 0);
++
++ addr += XEN_PAGE_SIZE;
+ }
+
+ return 0;
+--
+2.37.3
+