--- /dev/null
+From b9c826916fdce6419b94eb0cd8810fdac18c2386 Mon Sep 17 00:00:00 2001
+From: Berk Cem Goksel <berkcgoksel@gmail.com>
+Date: Fri, 10 Apr 2026 08:13:41 +0300
+Subject: ALSA: 6fire: fix use-after-free on disconnect
+
+From: Berk Cem Goksel <berkcgoksel@gmail.com>
+
+commit b9c826916fdce6419b94eb0cd8810fdac18c2386 upstream.
+
+In usb6fire_chip_abort(), the chip struct is allocated as the card's
+private data (via snd_card_new with sizeof(struct sfire_chip)). When
+snd_card_free_when_closed() is called and no file handles are open, the
+card and embedded chip are freed synchronously. The subsequent
+chip->card = NULL write then hits freed slab memory.
+
+Call trace:
+ usb6fire_chip_abort sound/usb/6fire/chip.c:59 [inline]
+ usb6fire_chip_disconnect+0x348/0x358 sound/usb/6fire/chip.c:182
+ usb_unbind_interface+0x1a8/0x88c drivers/usb/core/driver.c:458
+ ...
+ hub_event+0x1a04/0x4518 drivers/usb/core/hub.c:5953
+
+Fix by moving the card lifecycle out of usb6fire_chip_abort() and into
+usb6fire_chip_disconnect(). The card pointer is saved in a local
+before any teardown, snd_card_disconnect() is called first to prevent
+new opens, URBs are aborted while chip is still valid, and
+snd_card_free_when_closed() is called last so chip is never accessed
+after the card may be freed.
+
+Fixes: a0810c3d6dd2 ("ALSA: 6fire: Release resources at card release")
+Cc: stable@vger.kernel.org
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Signed-off-by: Berk Cem Goksel <berkcgoksel@gmail.com>
+Link: https://patch.msgid.link/20260410051341.1069716-1-berkcgoksel@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/usb/6fire/chip.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/sound/usb/6fire/chip.c
++++ b/sound/usb/6fire/chip.c
+@@ -53,11 +53,6 @@ static void usb6fire_chip_abort(struct s
+ usb6fire_comm_abort(chip);
+ if (chip->control)
+ usb6fire_control_abort(chip);
+- if (chip->card) {
+- snd_card_disconnect(chip->card);
+- snd_card_free_when_closed(chip->card);
+- chip->card = NULL;
+- }
+ }
+ }
+
+@@ -168,6 +163,7 @@ destroy_chip:
+ static void usb6fire_chip_disconnect(struct usb_interface *intf)
+ {
+ struct sfire_chip *chip;
++ struct snd_card *card;
+
+ chip = usb_get_intfdata(intf);
+ if (chip) { /* if !chip, fw upload has been performed */
+@@ -178,8 +174,19 @@ static void usb6fire_chip_disconnect(str
+ chips[chip->regidx] = NULL;
+ }
+
++ /*
++ * Save card pointer before teardown.
++ * snd_card_free_when_closed() may free card (and
++ * the embedded chip) immediately, so it must be
++ * called last and chip must not be accessed after.
++ */
++ card = chip->card;
+ chip->shutdown = true;
++ if (card)
++ snd_card_disconnect(card);
+ usb6fire_chip_abort(chip);
++ if (card)
++ snd_card_free_when_closed(card);
+ }
+ }
+ }
--- /dev/null
+From 6ec1235fc941dac6c011b30ee01d9220ff87e0cd Mon Sep 17 00:00:00 2001
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Date: Thu, 2 Apr 2026 08:11:06 +0000
+Subject: ASoC: qcom: q6apm: move component registration to unmanaged version
+
+From: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+
+commit 6ec1235fc941dac6c011b30ee01d9220ff87e0cd upstream.
+
+q6apm component registers dais dynamically from ASoC toplology, which
+are allocated using device managed version apis. Allocating both
+component and dynamic dais using managed version could lead to incorrect
+free ordering, dai will be freed while component still holding references
+to it.
+
+Fix this issue by moving component to unmanged version so
+that the dai pointers are only freeded after the component is removed.
+
+==================================================================
+BUG: KASAN: slab-use-after-free in snd_soc_del_component_unlocked+0x3d4/0x400 [snd_soc_core]
+Read of size 8 at addr ffff00084493a6e8 by task kworker/u48:0/3426
+Tainted: [W]=WARN
+Hardware name: LENOVO 21N2ZC5PUS/21N2ZC5PUS, BIOS N42ET57W (1.31 ) 08/08/2024
+Workqueue: pdr_notifier_wq pdr_notifier_work [pdr_interface]
+Call trace:
+ show_stack+0x28/0x7c (C)
+ dump_stack_lvl+0x60/0x80
+ print_report+0x160/0x4b4
+ kasan_report+0xac/0xfc
+ __asan_report_load8_noabort+0x20/0x34
+ snd_soc_del_component_unlocked+0x3d4/0x400 [snd_soc_core]
+ snd_soc_unregister_component_by_driver+0x50/0x88 [snd_soc_core]
+ devm_component_release+0x30/0x5c [snd_soc_core]
+ devres_release_all+0x13c/0x210
+ device_unbind_cleanup+0x20/0x190
+ device_release_driver_internal+0x350/0x468
+ device_release_driver+0x18/0x30
+ bus_remove_device+0x1a0/0x35c
+ device_del+0x314/0x7f0
+ device_unregister+0x20/0xbc
+ apr_remove_device+0x5c/0x7c [apr]
+ device_for_each_child+0xd8/0x160
+ apr_pd_status+0x7c/0xa8 [apr]
+ pdr_notifier_work+0x114/0x240 [pdr_interface]
+ process_one_work+0x500/0xb70
+ worker_thread+0x630/0xfb0
+ kthread+0x370/0x6c0
+ ret_from_fork+0x10/0x20
+
+Allocated by task 77:
+ kasan_save_stack+0x40/0x68
+ kasan_save_track+0x20/0x40
+ kasan_save_alloc_info+0x44/0x58
+ __kasan_kmalloc+0xbc/0xdc
+ __kmalloc_node_track_caller_noprof+0x1f4/0x620
+ devm_kmalloc+0x7c/0x1c8
+ snd_soc_register_dai+0x50/0x4f0 [snd_soc_core]
+ soc_tplg_pcm_elems_load+0x55c/0x1eb8 [snd_soc_core]
+ snd_soc_tplg_component_load+0x4f8/0xb60 [snd_soc_core]
+ audioreach_tplg_init+0x124/0x1fc [snd_q6apm]
+ q6apm_audio_probe+0x10/0x1c [snd_q6apm]
+ snd_soc_component_probe+0x5c/0x118 [snd_soc_core]
+ soc_probe_component+0x44c/0xaf0 [snd_soc_core]
+ snd_soc_bind_card+0xad0/0x2370 [snd_soc_core]
+ snd_soc_register_card+0x3b0/0x4c0 [snd_soc_core]
+ devm_snd_soc_register_card+0x50/0xc8 [snd_soc_core]
+ x1e80100_platform_probe+0x208/0x368 [snd_soc_x1e80100]
+ platform_probe+0xc0/0x188
+ really_probe+0x188/0x804
+ __driver_probe_device+0x158/0x358
+ driver_probe_device+0x60/0x190
+ __device_attach_driver+0x16c/0x2a8
+ bus_for_each_drv+0x100/0x194
+ __device_attach+0x174/0x380
+ device_initial_probe+0x14/0x20
+ bus_probe_device+0x124/0x154
+ deferred_probe_work_func+0x140/0x220
+ process_one_work+0x500/0xb70
+ worker_thread+0x630/0xfb0
+ kthread+0x370/0x6c0
+ ret_from_fork+0x10/0x20
+
+Freed by task 3426:
+ kasan_save_stack+0x40/0x68
+ kasan_save_track+0x20/0x40
+ __kasan_save_free_info+0x4c/0x80
+ __kasan_slab_free+0x78/0xa0
+ kfree+0x100/0x4a4
+ devres_release_all+0x144/0x210
+ device_unbind_cleanup+0x20/0x190
+ device_release_driver_internal+0x350/0x468
+ device_release_driver+0x18/0x30
+ bus_remove_device+0x1a0/0x35c
+ device_del+0x314/0x7f0
+ device_unregister+0x20/0xbc
+ apr_remove_device+0x5c/0x7c [apr]
+ device_for_each_child+0xd8/0x160
+ apr_pd_status+0x7c/0xa8 [apr]
+ pdr_notifier_work+0x114/0x240 [pdr_interface]
+ process_one_work+0x500/0xb70
+ worker_thread+0x630/0xfb0
+ kthread+0x370/0x6c0
+ ret_from_fork+0x10/0x20
+
+Fixes: 5477518b8a0e ("ASoC: qdsp6: audioreach: add q6apm support")
+Cc: Stable@vger.kernel.org
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260402081118.348071-2-srinivas.kandagatla@oss.qualcomm.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/qcom/qdsp6/q6apm.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/qcom/qdsp6/q6apm.c
++++ b/sound/soc/qcom/qdsp6/q6apm.c
+@@ -767,13 +767,22 @@ static int apm_probe(gpr_device_t *gdev)
+
+ q6apm_get_apm_state(apm);
+
+- ret = devm_snd_soc_register_component(dev, &q6apm_audio_component, NULL, 0);
++ ret = snd_soc_register_component(dev, &q6apm_audio_component, NULL, 0);
+ if (ret < 0) {
+ dev_err(dev, "failed to register q6apm: %d\n", ret);
+ return ret;
+ }
+
+- return of_platform_populate(dev->of_node, NULL, NULL, dev);
++ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
++ if (ret)
++ snd_soc_unregister_component(dev);
++
++ return ret;
++}
++
++static void apm_remove(gpr_device_t *gdev)
++{
++ snd_soc_unregister_component(&gdev->dev);
+ }
+
+ struct audioreach_module *q6apm_find_module_by_mid(struct q6apm_graph *graph, uint32_t mid)
+@@ -840,6 +849,7 @@ MODULE_DEVICE_TABLE(of, apm_device_id);
+
+ static gpr_driver_t apm_driver = {
+ .probe = apm_probe,
++ .remove = apm_remove,
+ .gpr_callback = apm_callback,
+ .driver = {
+ .name = "qcom-apm",
--- /dev/null
+From fec114a98b8735ee89c75216c45a78e28be0f128 Mon Sep 17 00:00:00 2001
+From: Mingzhe Zou <mingzhe.zou@easystack.cn>
+Date: Sun, 22 Mar 2026 21:41:02 +0800
+Subject: bcache: fix cached_dev.sb_bio use-after-free and crash
+
+From: Mingzhe Zou <mingzhe.zou@easystack.cn>
+
+commit fec114a98b8735ee89c75216c45a78e28be0f128 upstream.
+
+In our production environment, we have received multiple crash reports
+regarding libceph, which have caught our attention:
+
+```
+[6888366.280350] Call Trace:
+[6888366.280452] blk_update_request+0x14e/0x370
+[6888366.280561] blk_mq_end_request+0x1a/0x130
+[6888366.280671] rbd_img_handle_request+0x1a0/0x1b0 [rbd]
+[6888366.280792] rbd_obj_handle_request+0x32/0x40 [rbd]
+[6888366.280903] __complete_request+0x22/0x70 [libceph]
+[6888366.281032] osd_dispatch+0x15e/0xb40 [libceph]
+[6888366.281164] ? inet_recvmsg+0x5b/0xd0
+[6888366.281272] ? ceph_tcp_recvmsg+0x6f/0xa0 [libceph]
+[6888366.281405] ceph_con_process_message+0x79/0x140 [libceph]
+[6888366.281534] ceph_con_v1_try_read+0x5d7/0xf30 [libceph]
+[6888366.281661] ceph_con_workfn+0x329/0x680 [libceph]
+```
+
+After analyzing the coredump file, we found that the address of
+dc->sb_bio has been freed. We know that cached_dev is only freed when it
+is stopped.
+
+Since sb_bio is a part of struct cached_dev, rather than an alloc every
+time. If the device is stopped while writing to the superblock, the
+released address will be accessed at endio.
+
+This patch hopes to wait for sb_write to complete in cached_dev_free.
+
+It should be noted that we analyzed the cause of the problem, then tell
+all details to the QWEN and adopted the modifications it made.
+
+Signed-off-by: Mingzhe Zou <mingzhe.zou@easystack.cn>
+Fixes: cafe563591446 ("bcache: A block layer cache")
+Cc: stable@vger.kernel.org # 3.10+
+Signed-off-by: Coly Li <colyli@fnnas.com>
+Link: https://patch.msgid.link/20260322134102.480107-1-colyli@fnnas.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/super.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1373,6 +1373,13 @@ static CLOSURE_CALLBACK(cached_dev_free)
+
+ mutex_unlock(&bch_register_lock);
+
++ /*
++ * Wait for any pending sb_write to complete before free.
++ * The sb_bio is embedded in struct cached_dev, so we must
++ * ensure no I/O is in progress.
++ */
++ closure_sync(&dc->sb_write);
++
+ if (dc->sb_disk)
+ folio_put(virt_to_folio(dc->sb_disk));
+
--- /dev/null
+From 08e57f5e1a9067d5fbf33993aa7f51d60b3d13a4 Mon Sep 17 00:00:00 2001
+From: Sanman Pradhan <psanman@juniper.net>
+Date: Fri, 10 Apr 2026 00:25:35 +0000
+Subject: hwmon: (powerz) Fix use-after-free on USB disconnect
+
+From: Sanman Pradhan <psanman@juniper.net>
+
+commit 08e57f5e1a9067d5fbf33993aa7f51d60b3d13a4 upstream.
+
+After powerz_disconnect() frees the URB and releases the mutex, a
+subsequent powerz_read() call can acquire the mutex and call
+powerz_read_data(), which dereferences the freed URB pointer.
+
+Fix by:
+ - Setting priv->urb to NULL in powerz_disconnect() so that
+ powerz_read_data() can detect the disconnected state.
+ - Adding a !priv->urb check at the start of powerz_read_data()
+ to return -ENODEV on a disconnected device.
+ - Moving usb_set_intfdata() before hwmon registration so the
+ disconnect handler can always find the priv pointer.
+
+Fixes: 4381a36abdf1c ("hwmon: add POWER-Z driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sanman Pradhan <psanman@juniper.net>
+Link: https://lore.kernel.org/r/20260410002521.422645-2-sanman.pradhan@hpe.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/powerz.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwmon/powerz.c
++++ b/drivers/hwmon/powerz.c
+@@ -108,6 +108,9 @@ static int powerz_read_data(struct usb_d
+ {
+ int ret;
+
++ if (!priv->urb)
++ return -ENODEV;
++
+ priv->status = -ETIMEDOUT;
+ reinit_completion(&priv->completion);
+
+@@ -224,6 +227,8 @@ static int powerz_probe(struct usb_inter
+ mutex_init(&priv->mutex);
+ init_completion(&priv->completion);
+
++ usb_set_intfdata(intf, priv);
++
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(parent, DRIVER_NAME, priv,
+ &powerz_chip_info, NULL);
+@@ -232,8 +237,6 @@ static int powerz_probe(struct usb_inter
+ return PTR_ERR(hwmon_dev);
+ }
+
+- usb_set_intfdata(intf, priv);
+-
+ return 0;
+ }
+
+@@ -244,6 +247,7 @@ static void powerz_disconnect(struct usb
+ mutex_lock(&priv->mutex);
+ usb_kill_urb(priv->urb);
+ usb_free_urb(priv->urb);
++ priv->urb = NULL;
+ mutex_unlock(&priv->mutex);
+ }
+
--- /dev/null
+From 0b16e69d17d8c35c5c9d5918bf596c75a44655d3 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 24 Feb 2026 17:20:36 -0800
+Subject: KVM: x86: Use scratch field in MMIO fragment to hold small write values
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 0b16e69d17d8c35c5c9d5918bf596c75a44655d3 upstream.
+
+When exiting to userspace to service an emulated MMIO write, copy the
+to-be-written value to a scratch field in the MMIO fragment if the size
+of the data payload is 8 bytes or less, i.e. can fit in a single chunk,
+instead of pointing the fragment directly at the source value.
+
+This fixes a class of use-after-free bugs that occur when the emulator
+initiates a write using an on-stack, local variable as the source, the
+write splits a page boundary, *and* both pages are MMIO pages. Because
+KVM's ABI only allows for physically contiguous MMIO requests, accesses
+that split MMIO pages are separated into two fragments, and are sent to
+userspace one at a time. When KVM attempts to complete userspace MMIO in
+response to KVM_RUN after the first fragment, KVM will detect the second
+fragment and generate a second userspace exit, and reference the on-stack
+variable.
+
+The issue is most visible if the second KVM_RUN is performed by a separate
+task, in which case the stack of the initiating task can show up as truly
+freed data.
+
+ ==================================================================
+ BUG: KASAN: use-after-free in complete_emulated_mmio+0x305/0x420
+ Read of size 1 at addr ffff888009c378d1 by task syz-executor417/984
+
+ CPU: 1 PID: 984 Comm: syz-executor417 Not tainted 5.10.0-182.0.0.95.h2627.eulerosv2r13.x86_64 #3
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.15.0-0-g2dd4b9b3f840-prebuilt.qemu.org 04/01/2014 Call Trace:
+ dump_stack+0xbe/0xfd
+ print_address_description.constprop.0+0x19/0x170
+ __kasan_report.cold+0x6c/0x84
+ kasan_report+0x3a/0x50
+ check_memory_region+0xfd/0x1f0
+ memcpy+0x20/0x60
+ complete_emulated_mmio+0x305/0x420
+ kvm_arch_vcpu_ioctl_run+0x63f/0x6d0
+ kvm_vcpu_ioctl+0x413/0xb20
+ __se_sys_ioctl+0x111/0x160
+ do_syscall_64+0x30/0x40
+ entry_SYSCALL_64_after_hwframe+0x67/0xd1
+ RIP: 0033:0x42477d
+ Code: <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
+ RSP: 002b:00007faa8e6890e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+ RAX: ffffffffffffffda RBX: 00000000004d7338 RCX: 000000000042477d
+ RDX: 0000000000000000 RSI: 000000000000ae80 RDI: 0000000000000005
+ RBP: 00000000004d7330 R08: 00007fff28d546df R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000246 R12: 00000000004d733c
+ R13: 0000000000000000 R14: 000000000040a200 R15: 00007fff28d54720
+
+ The buggy address belongs to the page:
+ page:0000000029f6a428 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x9c37
+ flags: 0xfffffc0000000(node=0|zone=1|lastcpupid=0x1fffff)
+ raw: 000fffffc0000000 0000000000000000 ffffea0000270dc8 0000000000000000
+ raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: kasan: bad access detected
+
+ Memory state around the buggy address:
+ ffff888009c37780: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+ ffff888009c37800: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+ >ffff888009c37880: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+ ^
+ ffff888009c37900: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+ ffff888009c37980: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
+ ==================================================================
+
+The bug can also be reproduced with a targeted KVM-Unit-Test by hacking
+KVM to fill a large on-stack variable in complete_emulated_mmio(), i.e. by
+overwrite the data value with garbage.
+
+Limit the use of the scratch fields to 8-byte or smaller accesses, and to
+just writes, as larger accesses and reads are not affected thanks to
+implementation details in the emulator, but add a sanity check to ensure
+those details don't change in the future. Specifically, KVM never uses
+on-stack variables for accesses larger that 8 bytes, e.g. uses an operand
+in the emulator context, and *all* reads are buffered through the mem_read
+cache.
+
+Note! Using the scratch field for reads is not only unnecessary, it's
+also extremely difficult to handle correctly. As above, KVM buffers all
+reads through the mem_read cache, and heavily relies on that behavior when
+re-emulating the instruction after a userspace MMIO read exit. If a read
+splits a page, the first page is NOT an MMIO page, and the second page IS
+an MMIO page, then the MMIO fragment needs to point at _just_ the second
+chunk of the destination, i.e. its position in the mem_read cache. Taking
+the "obvious" approach of copying the fragment value into the destination
+when re-emulating the instruction would clobber the first chunk of the
+destination, i.e. would clobber the data that was read from guest memory.
+
+Fixes: f78146b0f923 ("KVM: Fix page-crossing MMIO")
+Suggested-by: Yashu Zhang <zhangjiaji1@huawei.com>
+Reported-by: Yashu Zhang <zhangjiaji1@huawei.com>
+Closes: https://lore.kernel.org/all/369eaaa2b3c1425c85e8477066391bc7@huawei.com
+Cc: stable@vger.kernel.org
+Tested-by: Tom Lendacky <thomas.lendacky@gmail.com>
+Tested-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
+Link: https://patch.msgid.link/20260225012049.920665-2-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 14 +++++++++++++-
+ include/linux/kvm_host.h | 3 ++-
+ 2 files changed, 15 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8236,7 +8236,13 @@ static int emulator_read_write_onepage(u
+ WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
+ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+ frag->gpa = gpa;
+- frag->data = val;
++ if (write && bytes <= 8u) {
++ frag->val = 0;
++ frag->data = &frag->val;
++ memcpy(&frag->val, val, bytes);
++ } else {
++ frag->data = val;
++ }
+ frag->len = bytes;
+ return X86EMUL_CONTINUE;
+ }
+@@ -8251,6 +8257,9 @@ static int emulator_read_write(struct x8
+ gpa_t gpa;
+ int rc;
+
++ if (WARN_ON_ONCE((bytes > 8u || !ops->write) && object_is_on_stack(val)))
++ return X86EMUL_UNHANDLEABLE;
++
+ if (ops->read_write_prepare &&
+ ops->read_write_prepare(vcpu, val, bytes))
+ return X86EMUL_CONTINUE;
+@@ -11823,6 +11832,9 @@ static int complete_emulated_mmio(struct
+ frag++;
+ vcpu->mmio_cur_fragment++;
+ } else {
++ if (WARN_ON_ONCE(frag->data == &frag->val))
++ return -EIO;
++
+ /* Go forward to the next mmio piece. */
+ frag->data += len;
+ frag->gpa += len;
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -320,7 +320,8 @@ static inline bool kvm_vcpu_can_poll(kti
+ struct kvm_mmio_fragment {
+ gpa_t gpa;
+ void *data;
+- unsigned len;
++ u64 val;
++ unsigned int len;
+ };
+
+ struct kvm_vcpu {
--- /dev/null
+From a66485a934c7187ae8e36517d40615fa2e961cff Mon Sep 17 00:00:00 2001
+From: Abhishek Kumar <abhishek_sts8@yahoo.com>
+Date: Tue, 10 Mar 2026 22:14:37 +0530
+Subject: media: em28xx: fix use-after-free in em28xx_v4l2_open()
+
+From: Abhishek Kumar <abhishek_sts8@yahoo.com>
+
+commit a66485a934c7187ae8e36517d40615fa2e961cff upstream.
+
+em28xx_v4l2_open() reads dev->v4l2 without holding dev->lock,
+creating a race with em28xx_v4l2_init()'s error path and
+em28xx_v4l2_fini(), both of which free the em28xx_v4l2 struct
+and set dev->v4l2 to NULL under dev->lock.
+
+This race leads to two issues:
+ - use-after-free in v4l2_fh_init() when accessing vdev->ctrl_handler,
+ since the video_device is embedded in the freed em28xx_v4l2 struct.
+ - NULL pointer dereference in em28xx_resolution_set() when accessing
+ v4l2->norm, since dev->v4l2 has been set to NULL.
+
+Fix this by moving the mutex_lock() before the dev->v4l2 read and
+adding a NULL check for dev->v4l2 under the lock.
+
+Reported-by: syzbot+c025d34b8eaa54c571b8@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=c025d34b8eaa54c571b8
+Fixes: 8139a4d583ab ("[media] em28xx: move v4l2 user counting fields from struct em28xx to struct v4l2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Abhishek Kumar <abhishek_sts8@yahoo.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/em28xx/em28xx-video.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -2126,7 +2126,7 @@ static int em28xx_v4l2_open(struct file
+ {
+ struct video_device *vdev = video_devdata(filp);
+ struct em28xx *dev = video_drvdata(filp);
+- struct em28xx_v4l2 *v4l2 = dev->v4l2;
++ struct em28xx_v4l2 *v4l2;
+ enum v4l2_buf_type fh_type = 0;
+ int ret;
+
+@@ -2143,13 +2143,19 @@ static int em28xx_v4l2_open(struct file
+ return -EINVAL;
+ }
+
++ if (mutex_lock_interruptible(&dev->lock))
++ return -ERESTARTSYS;
++
++ v4l2 = dev->v4l2;
++ if (!v4l2) {
++ mutex_unlock(&dev->lock);
++ return -ENODEV;
++ }
++
+ em28xx_videodbg("open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[fh_type],
+ v4l2->users);
+
+- if (mutex_lock_interruptible(&dev->lock))
+- return -ERESTARTSYS;
+-
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ dev_err(&dev->intf->dev,
--- /dev/null
+From 76e35091ffc722ba39b303e48bc5d08abb59dd56 Mon Sep 17 00:00:00 2001
+From: Fan Wu <fanwu01@zju.edu.cn>
+Date: Wed, 4 Mar 2026 09:35:06 +0000
+Subject: media: mediatek: vcodec: fix use-after-free in encoder release path
+
+From: Fan Wu <fanwu01@zju.edu.cn>
+
+commit 76e35091ffc722ba39b303e48bc5d08abb59dd56 upstream.
+
+The fops_vcodec_release() function frees the context structure (ctx)
+without first cancelling any pending or running work in ctx->encode_work.
+This creates a race window where the workqueue handler (mtk_venc_worker)
+may still be accessing the context memory after it has been freed.
+
+Race condition:
+
+ CPU 0 (release path) CPU 1 (workqueue)
+ --------------------- ------------------
+ fops_vcodec_release()
+ v4l2_m2m_ctx_release()
+ v4l2_m2m_cancel_job()
+ // waits for m2m job "done"
+ mtk_venc_worker()
+ v4l2_m2m_job_finish()
+ // m2m job "done"
+ // BUT worker still running!
+ // post-job_finish access:
+ other ctx dereferences
+ // UAF if ctx already freed
+ // returns (job "done")
+ kfree(ctx) // ctx freed
+
+Root cause: The v4l2_m2m_ctx_release() only waits for the m2m job
+lifecycle (via TRANS_RUNNING flag), not the workqueue lifecycle.
+After v4l2_m2m_job_finish() is called, the m2m framework considers
+the job complete and v4l2_m2m_ctx_release() returns, but the worker
+function continues executing and may still access ctx.
+
+The work is queued during encode operations via:
+ queue_work(ctx->dev->encode_workqueue, &ctx->encode_work)
+The worker function accesses ctx->m2m_ctx, ctx->dev, and other ctx
+fields even after calling v4l2_m2m_job_finish().
+
+This vulnerability was confirmed with KASAN by running an instrumented
+test module that widens the post-job_finish race window. KASAN detected:
+
+ BUG: KASAN: slab-use-after-free in mtk_venc_worker+0x159/0x180
+ Read of size 4 at addr ffff88800326e000 by task kworker/u8:0/12
+
+ Workqueue: mtk_vcodec_enc_wq mtk_venc_worker
+
+ Allocated by task 47:
+ __kasan_kmalloc+0x7f/0x90
+ fops_vcodec_open+0x85/0x1a0
+
+ Freed by task 47:
+ __kasan_slab_free+0x43/0x70
+ kfree+0xee/0x3a0
+ fops_vcodec_release+0xb7/0x190
+
+Fix this by calling cancel_work_sync(&ctx->encode_work) before kfree(ctx).
+This ensures the workqueue handler is both cancelled (if pending) and
+synchronized (waits for any running handler to complete) before the
+context is freed.
+
+Placement rationale: The fix is placed after v4l2_ctrl_handler_free()
+and before list_del_init(&ctx->list). At this point, all m2m operations
+are done (v4l2_m2m_ctx_release() has returned), and we need to ensure
+the workqueue is synchronized before removing ctx from the list and
+freeing it.
+
+Note: The open error path does NOT need cancel_work_sync() because
+INIT_WORK() only initializes the work structure - it does not schedule
+it. Work is only scheduled later during device_run() operations.
+
+Fixes: 0934d3759615 ("media: mediatek: vcodec: separate decoder and encoder")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fan Wu <fanwu01@zju.edu.cn>
+Reviewed-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+@@ -215,6 +215,15 @@ static int fops_vcodec_release(struct fi
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
++ /*
++ * Cancel any pending encode work before freeing the context.
++ * Although v4l2_m2m_ctx_release() waits for m2m job completion,
++ * the workqueue handler (mtk_venc_worker) may still be accessing
++ * the context after v4l2_m2m_job_finish() returns. Without this,
++ * a use-after-free occurs when the worker accesses ctx after kfree.
++ */
++ cancel_work_sync(&ctx->encode_work);
++
+ spin_lock_irqsave(&dev->dev_ctx_lock, flags);
+ list_del_init(&ctx->list);
+ spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
--- /dev/null
+From a0e5a598fe9a4612b852406b51153b881592aede Mon Sep 17 00:00:00 2001
+From: Ruslan Valiyev <linuxoid@gmail.com>
+Date: Sun, 1 Mar 2026 21:07:35 +0000
+Subject: media: vidtv: fix nfeeds state corruption on start_streaming failure
+
+From: Ruslan Valiyev <linuxoid@gmail.com>
+
+commit a0e5a598fe9a4612b852406b51153b881592aede upstream.
+
+syzbot reported a memory leak in vidtv_psi_service_desc_init [1].
+
+When vidtv_start_streaming() fails inside vidtv_start_feed(), the
+nfeeds counter is left incremented even though no feed was actually
+started. This corrupts the driver state: subsequent start_feed calls
+see nfeeds > 1 and skip starting the mux, while stop_feed calls
+eventually try to stop a non-existent stream.
+
+This state corruption can also lead to memory leaks, since the mux
+and channel resources may be partially allocated during a failed
+start_streaming but never cleaned up, as the stop path finds
+dvb->streaming == false and returns early.
+
+Fix by decrementing nfeeds back when start_streaming fails, keeping
+the counter in sync with the actual number of active feeds.
+
+[1]
+BUG: memory leak
+unreferenced object 0xffff888145b50820 (size 32):
+ comm "syz.0.17", pid 6068, jiffies 4294944486
+ backtrace (crc 90a0c7d4):
+ vidtv_psi_service_desc_init+0x74/0x1b0 drivers/media/test-drivers/vidtv/vidtv_psi.c:288
+ vidtv_channel_s302m_init+0xb1/0x2a0 drivers/media/test-drivers/vidtv/vidtv_channel.c:83
+ vidtv_channels_init+0x1b/0x40 drivers/media/test-drivers/vidtv/vidtv_channel.c:524
+ vidtv_mux_init+0x516/0xbe0 drivers/media/test-drivers/vidtv/vidtv_mux.c:518
+ vidtv_start_streaming drivers/media/test-drivers/vidtv/vidtv_bridge.c:194 [inline]
+ vidtv_start_feed+0x33e/0x4d0 drivers/media/test-drivers/vidtv/vidtv_bridge.c:239
+
+Fixes: f90cf6079bf67 ("media: vidtv: add a bridge driver")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+639ebc6ec75e96674741@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=639ebc6ec75e96674741
+Signed-off-by: Ruslan Valiyev <linuxoid@gmail.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/test-drivers/vidtv/vidtv_bridge.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+@@ -237,8 +237,10 @@ static int vidtv_start_feed(struct dvb_d
+
+ if (dvb->nfeeds == 1) {
+ ret = vidtv_start_streaming(dvb);
+- if (ret < 0)
++ if (ret < 0) {
++ dvb->nfeeds--;
+ rc = ret;
++ }
+ }
+
+ mutex_unlock(&dvb->feed_lock);
--- /dev/null
+From 8f5857be99f1ed1fa80991c72449541f634626ee Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Mon, 13 Apr 2026 03:09:19 -0700
+Subject: mm: blk-cgroup: fix use-after-free in cgwb_release_workfn()
+
+From: Breno Leitao <leitao@debian.org>
+
+commit 8f5857be99f1ed1fa80991c72449541f634626ee upstream.
+
+cgwb_release_workfn() calls css_put(wb->blkcg_css) and then later accesses
+wb->blkcg_css again via blkcg_unpin_online(). If css_put() drops the last
+reference, the blkcg can be freed asynchronously (css_free_rwork_fn ->
+blkcg_css_free -> kfree) before blkcg_unpin_online() dereferences the
+pointer to access blkcg->online_pin, resulting in a use-after-free:
+
+ BUG: KASAN: slab-use-after-free in blkcg_unpin_online (./include/linux/instrumented.h:112 ./include/linux/atomic/atomic-instrumented.h:400 ./include/linux/refcount.h:389 ./include/linux/refcount.h:432 ./include/linux/refcount.h:450 block/blk-cgroup.c:1367)
+ Write of size 4 at addr ff11000117aa6160 by task kworker/71:1/531
+ Workqueue: cgwb_release cgwb_release_workfn
+ Call Trace:
+ <TASK>
+ blkcg_unpin_online (./include/linux/instrumented.h:112 ./include/linux/atomic/atomic-instrumented.h:400 ./include/linux/refcount.h:389 ./include/linux/refcount.h:432 ./include/linux/refcount.h:450 block/blk-cgroup.c:1367)
+ cgwb_release_workfn (mm/backing-dev.c:629)
+ process_scheduled_works (kernel/workqueue.c:3278 kernel/workqueue.c:3385)
+
+ Freed by task 1016:
+ kfree (./include/linux/kasan.h:235 mm/slub.c:2689 mm/slub.c:6246 mm/slub.c:6561)
+ css_free_rwork_fn (kernel/cgroup/cgroup.c:5542)
+ process_scheduled_works (kernel/workqueue.c:3302 kernel/workqueue.c:3385)
+
+** Stack based on commit 66672af7a095 ("Add linux-next specific files
+for 20260410")
+
+I am seeing this crash sporadically in Meta fleet across multiple kernel
+versions. A full reproducer is available at:
+https://github.com/leitao/debug/blob/main/reproducers/repro_blkcg_uaf.sh
+
+(The race window is narrow. To make it easily reproducible, inject a
+msleep(100) between css_put() and blkcg_unpin_online() in
+cgwb_release_workfn(). With that delay and a KASAN-enabled kernel, the
+reproducer triggers the splat reliably in less than a second.)
+
+Fix this by moving blkcg_unpin_online() before css_put(), so the
+cgwb's CSS reference keeps the blkcg alive while blkcg_unpin_online()
+accesses it.
+
+Link: https://lore.kernel.org/20260413-blkcg-v1-1-35b72622d16c@debian.org
+Fixes: 59b57717fff8 ("blkcg: delay blkg destruction until after writeback has finished")
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Dennis Zhou <dennis@kernel.org>
+Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Josef Bacik <josef@toxicpanda.com>
+Cc: JP Kobryn <inwardvessel@gmail.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: Martin KaFai Lau <martin.lau@linux.dev>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/backing-dev.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -618,12 +618,13 @@ static void cgwb_release_workfn(struct w
+ wb_shutdown(wb);
+
+ css_put(wb->memcg_css);
+- css_put(wb->blkcg_css);
+- mutex_unlock(&wb->bdi->cgwb_release_mutex);
+
+ /* triggers blkg destruction if no online users left */
+ blkcg_unpin_online(wb->blkcg_css);
+
++ css_put(wb->blkcg_css);
++ mutex_unlock(&wb->bdi->cgwb_release_mutex);
++
+ fprop_local_destroy_percpu(&wb->memcg_completions);
+
+ spin_lock_irq(&cgwb_lock);
--- /dev/null
+From 51d8c78be0c27ddb91bc2c0263941d8b30a47d3b Mon Sep 17 00:00:00 2001
+From: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
+Date: Tue, 24 Feb 2026 18:53:16 +0530
+Subject: mm/kasan: fix double free for kasan pXds
+
+From: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+
+commit 51d8c78be0c27ddb91bc2c0263941d8b30a47d3b upstream.
+
+kasan_free_pxd() assumes the page table is always struct page aligned.
+But that's not always the case for all architectures. E.g. In case of
+powerpc with 64K pagesize, PUD table (of size 4096) comes from slab cache
+named pgtable-2^9. Hence instead of page_to_virt(pxd_page()) let's just
+directly pass the start of the pxd table which is passed as the 1st
+argument.
+
+This fixes the below double free kasan issue seen with PMEM:
+
+radix-mmu: Mapped 0x0000047d10000000-0x0000047f90000000 with 2.00 MiB pages
+==================================================================
+BUG: KASAN: double-free in kasan_remove_zero_shadow+0x9c4/0xa20
+Free of addr c0000003c38e0000 by task ndctl/2164
+
+CPU: 34 UID: 0 PID: 2164 Comm: ndctl Not tainted 6.19.0-rc1-00048-gea1013c15392 #157 VOLUNTARY
+Hardware name: IBM,9080-HEX POWER10 (architected) 0x800200 0xf000006 of:IBM,FW1060.00 (NH1060_012) hv:phyp pSeries
+Call Trace:
+ dump_stack_lvl+0x88/0xc4 (unreliable)
+ print_report+0x214/0x63c
+ kasan_report_invalid_free+0xe4/0x110
+ check_slab_allocation+0x100/0x150
+ kmem_cache_free+0x128/0x6e0
+ kasan_remove_zero_shadow+0x9c4/0xa20
+ memunmap_pages+0x2b8/0x5c0
+ devm_action_release+0x54/0x70
+ release_nodes+0xc8/0x1a0
+ devres_release_all+0xe0/0x140
+ device_unbind_cleanup+0x30/0x120
+ device_release_driver_internal+0x3e4/0x450
+ unbind_store+0xfc/0x110
+ drv_attr_store+0x78/0xb0
+ sysfs_kf_write+0x114/0x140
+ kernfs_fop_write_iter+0x264/0x3f0
+ vfs_write+0x3bc/0x7d0
+ ksys_write+0xa4/0x190
+ system_call_exception+0x190/0x480
+ system_call_vectored_common+0x15c/0x2ec
+---- interrupt: 3000 at 0x7fff93b3d3f4
+NIP: 00007fff93b3d3f4 LR: 00007fff93b3d3f4 CTR: 0000000000000000
+REGS: c0000003f1b07e80 TRAP: 3000 Not tainted (6.19.0-rc1-00048-gea1013c15392)
+MSR: 800000000280f033 <SF,VEC,VSX,EE,PR,FP,ME,IR,DR,RI,LE> CR: 48888208 XER: 00000000
+<...>
+NIP [00007fff93b3d3f4] 0x7fff93b3d3f4
+LR [00007fff93b3d3f4] 0x7fff93b3d3f4
+---- interrupt: 3000
+
+ The buggy address belongs to the object at c0000003c38e0000
+ which belongs to the cache pgtable-2^9 of size 4096
+ The buggy address is located 0 bytes inside of
+ 4096-byte region [c0000003c38e0000, c0000003c38e1000)
+
+ The buggy address belongs to the physical page:
+ page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x3c38c
+ head: order:2 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
+ memcg:c0000003bfd63e01
+ flags: 0x63ffff800000040(head|node=6|zone=0|lastcpupid=0x7ffff)
+ page_type: f5(slab)
+ raw: 063ffff800000040 c000000140058980 5deadbeef0000122 0000000000000000
+ raw: 0000000000000000 0000000080200020 00000000f5000000 c0000003bfd63e01
+ head: 063ffff800000040 c000000140058980 5deadbeef0000122 0000000000000000
+ head: 0000000000000000 0000000080200020 00000000f5000000 c0000003bfd63e01
+ head: 063ffff800000002 c00c000000f0e301 00000000ffffffff 00000000ffffffff
+ head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000004
+ page dumped because: kasan: bad access detected
+
+[ 138.953636] [ T2164] Memory state around the buggy address:
+[ 138.953643] [ T2164] c0000003c38dff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 138.953652] [ T2164] c0000003c38dff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 138.953661] [ T2164] >c0000003c38e0000: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 138.953669] [ T2164] ^
+[ 138.953675] [ T2164] c0000003c38e0080: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 138.953684] [ T2164] c0000003c38e0100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+[ 138.953692] [ T2164] ==================================================================
+[ 138.953701] [ T2164] Disabling lock debugging due to kernel taint
+
+Link: https://lkml.kernel.org/r/2f9135c7866c6e0d06e960993b8a5674a9ebc7ec.1771938394.git.ritesh.list@gmail.com
+Fixes: 0207df4fa1a8 ("kernel/memremap, kasan: make ZONE_DEVICE with work with KASAN")
+Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Reported-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/init.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/mm/kasan/init.c
++++ b/mm/kasan/init.c
+@@ -292,7 +292,7 @@ static void kasan_free_pte(pte_t *pte_st
+ return;
+ }
+
+- pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
++ pte_free_kernel(&init_mm, pte_start);
+ pmd_clear(pmd);
+ }
+
+@@ -307,7 +307,7 @@ static void kasan_free_pmd(pmd_t *pmd_st
+ return;
+ }
+
+- pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
++ pmd_free(&init_mm, pmd_start);
+ pud_clear(pud);
+ }
+
+@@ -322,7 +322,7 @@ static void kasan_free_pud(pud_t *pud_st
+ return;
+ }
+
+- pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
++ pud_free(&init_mm, pud_start);
+ p4d_clear(p4d);
+ }
+
+@@ -337,7 +337,7 @@ static void kasan_free_p4d(p4d_t *p4d_st
+ return;
+ }
+
+- p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
++ p4d_free(&init_mm, p4d_start);
+ pgd_clear(pgd);
+ }
+
ocfs2-validate-inline-data-i_size-during-inode-read.patch
ocfs2-fix-out-of-bounds-write-in-ocfs2_write_end_inline.patch
checkpatch-add-support-for-assisted-by-tag.patch
+x86-64-rename-misleadingly-named-__copy_user_nocache-function.patch
+x86-rename-and-clean-up-__copy_from_user_inatomic_nocache.patch
+x86-64-arm64-powerpc-clean-up-and-rename-__copy_from_user_flushcache.patch
+kvm-x86-use-scratch-field-in-mmio-fragment-to-hold-small-write-values.patch
+asoc-qcom-q6apm-move-component-registration-to-unmanaged-version.patch
+mm-kasan-fix-double-free-for-kasan-pxds.patch
+mm-blk-cgroup-fix-use-after-free-in-cgwb_release_workfn.patch
+media-vidtv-fix-nfeeds-state-corruption-on-start_streaming-failure.patch
+media-mediatek-vcodec-fix-use-after-free-in-encoder-release-path.patch
+media-em28xx-fix-use-after-free-in-em28xx_v4l2_open.patch
+hwmon-powerz-fix-use-after-free-on-usb-disconnect.patch
+alsa-6fire-fix-use-after-free-on-disconnect.patch
+bcache-fix-cached_dev.sb_bio-use-after-free-and-crash.patch
--- /dev/null
+From 809b997a5ce945ab470f70c187048fe4f5df20bf Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 30 Mar 2026 14:52:45 -0700
+Subject: x86-64/arm64/powerpc: clean up and rename __copy_from_user_flushcache
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 809b997a5ce945ab470f70c187048fe4f5df20bf upstream.
+
+This finishes the work on these odd functions that were only implemented
+by a handful of architectures.
+
+The 'flushcache' function was only used from the iterator code, and
+let's make it do the same thing that the nontemporal version does:
+remove the two underscores and add the user address checking.
+
+Yes, yes, the user address checking is also done at iovec import time,
+but we have long since walked away from the old double-underscore thing
+where we try to avoid address checking overhead at access time, and
+these functions shouldn't be so special and old-fashioned.
+
+The arm64 version already did the address check, in fact, so there it's
+just a matter of renaming it. For powerpc and x86-64 we now do the
+proper user access boilerplate.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/uaccess.h | 2 +-
+ arch/powerpc/include/asm/uaccess.h | 3 +--
+ arch/powerpc/lib/pmem.c | 11 ++++++-----
+ arch/x86/include/asm/uaccess_64.h | 8 ++++----
+ arch/x86/lib/usercopy_64.c | 8 ++++----
+ lib/iov_iter.c | 2 +-
+ 6 files changed, 17 insertions(+), 17 deletions(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -480,7 +480,7 @@ extern __must_check long strnlen_user(co
+ #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
+
+-static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
++static inline size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size)
+ {
+ kasan_check_write(dst, size);
+ return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -407,8 +407,7 @@ copy_mc_to_user(void __user *to, const v
+ }
+ #endif
+
+-extern long __copy_from_user_flushcache(void *dst, const void __user *src,
+- unsigned size);
++extern size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size);
+
+ static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+ {
+--- a/arch/powerpc/lib/pmem.c
++++ b/arch/powerpc/lib/pmem.c
+@@ -66,15 +66,16 @@ EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+ /*
+ * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols
+ */
+-long __copy_from_user_flushcache(void *dest, const void __user *src,
+- unsigned size)
++size_t copy_from_user_flushcache(void *dest, const void __user *src,
++ size_t size)
+ {
+- unsigned long copied, start = (unsigned long) dest;
++ unsigned long not_copied, start = (unsigned long) dest;
+
+- copied = __copy_from_user(dest, src, size);
++ src = mask_user_address(src);
++ not_copied = __copy_from_user(dest, src, size);
+ clean_pmem_range(start, start + size);
+
+- return copied;
++ return not_copied;
+ }
+
+ void memcpy_flushcache(void *dest, const void *src, size_t size)
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -149,7 +149,7 @@ raw_copy_to_user(void __user *dst, const
+
+ #define copy_to_nontemporal copy_to_nontemporal
+ extern size_t copy_to_nontemporal(void *dst, const void *src, size_t size);
+-extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
++extern size_t copy_user_flushcache(void *dst, const void __user *src, size_t size);
+
+ static inline int
+ copy_from_user_inatomic_nontemporal(void *dst, const void __user *src,
+@@ -164,11 +164,11 @@ copy_from_user_inatomic_nontemporal(void
+ return ret;
+ }
+
+-static inline int
+-__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
++static inline size_t
++copy_from_user_flushcache(void *dst, const void __user *src, size_t size)
+ {
+ kasan_check_write(dst, size);
+- return __copy_user_flushcache(dst, src, size);
++ return copy_user_flushcache(dst, src, size);
+ }
+
+ /*
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -43,14 +43,14 @@ void arch_wb_cache_pmem(void *addr, size
+ }
+ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+-long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
++size_t copy_user_flushcache(void *dst, const void __user *src, size_t size)
+ {
+ unsigned long flushed, dest = (unsigned long) dst;
+- long rc;
++ unsigned long rc;
+
+- stac();
++ src = masked_user_access_begin(src);
+ rc = copy_to_nontemporal(dst, (__force const void *)src, size);
+- clac();
++ user_access_end();
+
+ /*
+ * copy_to_nontemporal() uses non-temporal stores for the bulk
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -284,7 +284,7 @@ static __always_inline
+ size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+ {
+- return __copy_from_user_flushcache(to + progress, iter_from, len);
++ return copy_from_user_flushcache(to + progress, iter_from, len);
+ }
+
+ static __always_inline
--- /dev/null
+From d187a86de793f84766ea40b9ade7ac60aabbb4fe Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 30 Mar 2026 10:39:09 -0700
+Subject: x86-64: rename misleadingly named '__copy_user_nocache()' function
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit d187a86de793f84766ea40b9ade7ac60aabbb4fe upstream.
+
+This function was a masterclass in bad naming, for various historical
+reasons.
+
+It claimed to be a non-cached user copy. It is literally _neither_ of
+those things. It's a specialty memory copy routine that uses
+non-temporal stores for the destination (but not the source), and that
+does exception handling for both source and destination accesses.
+
+Also note that while it works for unaligned targets, any unaligned parts
+(whether at beginning or end) will not use non-temporal stores, since
+only words and quadwords can be non-temporal on x86.
+
+The exception handling means that it _can_ be used for user space
+accesses, but not on its own - it needs all the normal "start user space
+access" logic around it.
+
+But typically the user space access would be the source, not the
+non-temporal destination. That was the original intention of this,
+where the destination was some fragile persistent memory target that
+needed non-temporal stores in order to catch machine check exceptions
+synchronously and deal with them gracefully.
+
+Thus that non-descriptive name: one use case was to copy from user space
+into a non-cached kernel buffer. However, the existing users are a mix
+of that intended use-case, and a couple of random drivers that just did
+this as a performance tweak.
+
+Some of those random drivers then actively misused the user copying
+version (with STAC/CLAC and all) to do kernel copies without ever even
+caring about the exception handling, _just_ for the non-temporal
+destination.
+
+Rename it as a first small step to actually make it halfway sane, and
+change the prototype to be more normal: it doesn't take a user pointer
+unless the caller has done the proper conversion, and the argument size
+is the full size_t (it still won't actually copy more than 4GB in one
+go, but there's also no reason to silently truncate the size argument in
+the caller).
+
+Finally, use this now sanely named function in the NTB code, which
+mis-used a user copy version (with STAC/CLAC and all) of this interface
+despite it not actually being a user copy at all.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/uaccess_64.h | 5 +++--
+ arch/x86/lib/copy_user_uncached_64.S | 6 +++---
+ arch/x86/lib/usercopy_64.c | 4 ++--
+ drivers/infiniband/sw/rdmavt/qp.c | 8 +++-----
+ drivers/ntb/ntb_transport.c | 7 ++++---
+ tools/objtool/check.c | 2 +-
+ 6 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -147,7 +147,8 @@ raw_copy_to_user(void __user *dst, const
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
++#define copy_to_nontemporal copy_to_nontemporal
++extern size_t copy_to_nontemporal(void *dst, const void *src, size_t size);
+ extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
+
+ static inline int
+@@ -157,7 +158,7 @@ __copy_from_user_inatomic_nocache(void *
+ long ret;
+ kasan_check_write(dst, size);
+ stac();
+- ret = __copy_user_nocache(dst, src, size);
++ ret = copy_to_nontemporal(dst, (__force const void *)src, size);
+ clac();
+ return ret;
+ }
+--- a/arch/x86/lib/copy_user_uncached_64.S
++++ b/arch/x86/lib/copy_user_uncached_64.S
+@@ -27,7 +27,7 @@
+ * Output:
+ * rax uncopied bytes or 0 if successful.
+ */
+-SYM_FUNC_START(__copy_user_nocache)
++SYM_FUNC_START(copy_to_nontemporal)
+ ANNOTATE_NOENDBR
+ /* If destination is not 7-byte aligned, we'll have to align it */
+ testb $7,%dil
+@@ -240,5 +240,5 @@ _ASM_EXTABLE_UA(95b, .Ldone)
+ _ASM_EXTABLE_UA(52b, .Ldone0)
+ _ASM_EXTABLE_UA(53b, .Ldone0)
+
+-SYM_FUNC_END(__copy_user_nocache)
+-EXPORT_SYMBOL(__copy_user_nocache)
++SYM_FUNC_END(copy_to_nontemporal)
++EXPORT_SYMBOL(copy_to_nontemporal)
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -49,11 +49,11 @@ long __copy_user_flushcache(void *dst, c
+ long rc;
+
+ stac();
+- rc = __copy_user_nocache(dst, src, size);
++ rc = copy_to_nontemporal(dst, (__force const void *)src, size);
+ clac();
+
+ /*
+- * __copy_user_nocache() uses non-temporal stores for the bulk
++ * copy_to_nontemporal() uses non-temporal stores for the bulk
+ * of the transfer, but we need to manually flush if the
+ * transfer is unaligned. A cached memory copy is used when
+ * destination or size is not naturally aligned. That is:
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -92,12 +92,10 @@ static int rvt_wss_llc_size(void)
+ static void cacheless_memcpy(void *dst, void *src, size_t n)
+ {
+ /*
+- * Use the only available X64 cacheless copy. Add a __user cast
+- * to quiet sparse. The src agument is already in the kernel so
+- * there are no security issues. The extra fault recovery machinery
+- * is not invoked.
++ * Use the only available X64 cacheless copy.
++ * The extra fault recovery machinery is not invoked.
+ */
+- __copy_user_nocache(dst, (void __user *)src, n);
++ copy_to_nontemporal(dst, src, n);
+ }
+
+ void rvt_wss_exit(struct rvt_dev_info *rdi)
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -1810,12 +1810,13 @@ static void ntb_tx_copy_callback(void *d
+
+ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
+ {
+-#ifdef ARCH_HAS_NOCACHE_UACCESS
++#ifdef copy_to_nontemporal
+ /*
+ * Using non-temporal mov to improve performance on non-cached
+- * writes, even though we aren't actually copying from user space.
++ * writes. This only works if __iomem is strictly memory-like,
++ * but that is the case on x86-64
+ */
+- __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
++ copy_to_nontemporal(offset, entry->buf, entry->len);
+ #else
+ memcpy_toio(offset, entry->buf, entry->len);
+ #endif
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1207,7 +1207,7 @@ static const char *uaccess_safe_builtin[
+ "copy_mc_enhanced_fast_string",
+ "rep_stos_alternative",
+ "rep_movs_alternative",
+- "__copy_user_nocache",
++ "copy_to_nontemporal",
+ NULL
+ };
+
--- /dev/null
+From 5de7bcaadf160c1716b20a263cf8f5b06f658959 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 30 Mar 2026 13:11:07 -0700
+Subject: x86: rename and clean up __copy_from_user_inatomic_nocache()
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 5de7bcaadf160c1716b20a263cf8f5b06f658959 upstream.
+
+Similarly to the previous commit, this renames the somewhat confusingly
+named function. But in this case, it was at least less confusing: the
+__copy_from_user_inatomic_nocache is indeed copying from user memory,
+and it is indeed ok to be used in an atomic context, so it will not warn
+about it.
+
+But the previous commit also removed the NTB mis-use of the
+__copy_from_user_inatomic_nocache() function, and as a result every
+call-site is now _actually_ doing a real user copy. That means that we
+can now do the proper user pointer verification too.
+
+End result: add proper address checking, remove the double underscores,
+and change the "nocache" to "nontemporal" to more accurately describe
+what this x86-only function actually does. It might be worth noting
+that only the target is non-temporal: the actual user accesses are
+normal memory accesses.
+
+Also worth noting is that non-x86 targets (and on older 32-bit x86 CPU's
+before XMM2 in the Pentium III) we end up just falling back on a regular
+user copy, so nothing can actually depend on the non-temporal semantics,
+but that has always been true.
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/uaccess.h | 2 +-
+ arch/x86/include/asm/uaccess_32.h | 8 +-------
+ arch/x86/include/asm/uaccess_64.h | 3 ++-
+ arch/x86/lib/usercopy_32.c | 9 +++++----
+ drivers/gpu/drm/i915/i915_gem.c | 2 +-
+ drivers/gpu/drm/qxl/qxl_ioctl.c | 2 +-
+ include/linux/uaccess.h | 11 ++++++++---
+ lib/iov_iter.c | 2 +-
+ 8 files changed, 20 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -507,7 +507,7 @@ extern struct movsl_mask {
+ } ____cacheline_aligned_in_smp movsl_mask;
+ #endif
+
+-#define ARCH_HAS_NOCACHE_UACCESS 1
++#define ARCH_HAS_NONTEMPORAL_UACCESS 1
+
+ /*
+ * The "unsafe" user accesses aren't really "unsafe", but the naming
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -26,13 +26,7 @@ raw_copy_from_user(void *to, const void
+ return __copy_user_ll(to, (__force const void *)from, n);
+ }
+
+-static __always_inline unsigned long
+-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+- unsigned long n)
+-{
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
+-}
+-
++unsigned long __must_check copy_from_user_inatomic_nontemporal(void *, const void __user *, unsigned long n);
+ unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+ unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -152,11 +152,12 @@ extern size_t copy_to_nontemporal(void *
+ extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
+
+ static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++copy_from_user_inatomic_nontemporal(void *dst, const void __user *src,
+ unsigned size)
+ {
+ long ret;
+ kasan_check_write(dst, size);
++ src = mask_user_address(src);
+ stac();
+ ret = copy_to_nontemporal(dst, (__force const void *)src, size);
+ clac();
+--- a/arch/x86/lib/usercopy_32.c
++++ b/arch/x86/lib/usercopy_32.c
+@@ -322,10 +322,11 @@ unsigned long __copy_user_ll(void *to, c
+ }
+ EXPORT_SYMBOL(__copy_user_ll);
+
+-unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
++unsigned long copy_from_user_inatomic_nontemporal(void *to, const void __user *from,
+ unsigned long n)
+ {
+- __uaccess_begin_nospec();
++ if (!user_access_begin(from, n))
++ return n;
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
+ n = __copy_user_intel_nocache(to, from, n);
+@@ -334,7 +335,7 @@ unsigned long __copy_from_user_ll_nocach
+ #else
+ __copy_user(to, from, n);
+ #endif
+- __uaccess_end();
++ user_access_end();
+ return n;
+ }
+-EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
++EXPORT_SYMBOL(copy_from_user_inatomic_nontemporal);
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -519,7 +519,7 @@ ggtt_write(struct io_mapping *mapping,
+
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = io_mapping_map_atomic_wc(mapping, base);
+- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
++ unwritten = copy_from_user_inatomic_nontemporal((void __force *)vaddr + offset,
+ user_data, length);
+ io_mapping_unmap_atomic(vaddr);
+ if (unwritten) {
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -182,7 +182,7 @@ static int qxl_process_single_command(st
+
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
+- unwritten = __copy_from_user_inatomic_nocache
++ unwritten = copy_from_user_inatomic_nontemporal
+ (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
+ u64_to_user_ptr(cmd->command), cmd->command_size);
+
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -320,16 +320,21 @@ static inline size_t probe_subpage_write
+
+ #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
+-#ifndef ARCH_HAS_NOCACHE_UACCESS
++#ifndef ARCH_HAS_NONTEMPORAL_UACCESS
+
+ static inline __must_check unsigned long
+-__copy_from_user_inatomic_nocache(void *to, const void __user *from,
++copy_from_user_inatomic_nontemporal(void *to, const void __user *from,
+ unsigned long n)
+ {
++ if (can_do_masked_user_access())
++ from = mask_user_address(from);
++ else
++ if (!access_ok(from, n))
++ return n;
+ return __copy_from_user_inatomic(to, from, n);
+ }
+
+-#endif /* ARCH_HAS_NOCACHE_UACCESS */
++#endif /* ARCH_HAS_NONTEMPORAL_UACCESS */
+
+ extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -265,7 +265,7 @@ static __always_inline
+ size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+ {
+- return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
++ return copy_from_user_inatomic_nontemporal(to + progress, iter_from, len);
+ }
+
+ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)