--- /dev/null
+From 07f8d256b5d7918ca15aedb0d4ed4f81327b0c56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Dec 2020 14:35:18 +0100
+Subject: media: uvcvideo: Allow extra entities
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit cae79e50d1222010fde8c522410c315f74d35c40 ]
+
+Increase the size of the id, to avoid collisions with entities
+implemented by the driver that are not part of the UVC device.
+
+Entities exposed by the UVC device use IDs 0-255, extra entities
+implemented by the driver (such as the GPIO entity) use IDs 256 and
+up.
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Stable-dep-of: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 2 +-
+ drivers/media/usb/uvc/uvcvideo.h | 7 ++++++-
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 15202269194ad..e1d3e753e80ed 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1037,7 +1037,7 @@ static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
++static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ unsigned int num_pads, unsigned int extra_size)
+ {
+ struct uvc_entity *entity;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 656ab4d9356c2..0e4209dbf307f 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -302,7 +302,12 @@ struct uvc_entity {
+ * chain. */
+ unsigned int flags;
+
+- u8 id;
++ /*
++ * Entities exposed by the UVC device use IDs 0-255, extra entities
++ * implemented by the driver (such as the GPIO entity) use IDs 256 and
++ * up.
++ */
++ u16 id;
+ u16 type;
+ char name[64];
+ u8 guid[16];
+--
+2.53.0
+
--- /dev/null
+From 58f94a0cfefa6086d22fd60e0fecced10d18967f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Dec 2020 14:35:22 +0100
+Subject: media: uvcvideo: Implement UVC_EXT_GPIO_UNIT
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 2886477ff98740cc3333cf785e4de0b1ff3d7a28 ]
+
+Some devices can implement a physical switch to disable the input of the
+camera on demand. Think of it like an elegant privacy sticker.
+
+The system can read the status of the privacy switch via a GPIO.
+
+It is important to know the status of the switch, e.g. to notify the
+user when the camera will produce black frames and a videochat
+application is used.
+
+In some systems, the GPIO is connected to the main SoC instead of the
+camera controller, with the connection reported by the system firmware
+(ACPI or DT). In that case, the UVC device isn't aware of the GPIO. We
+need to implement a virtual entity to handle the GPIO fully on the
+driver side.
+
+For example, for ACPI-based systems, the GPIO is reported in the USB
+device object:
+
+ Scope (\_SB.PCI0.XHCI.RHUB.HS07)
+ {
+
+ /.../
+
+ Name (_CRS, ResourceTemplate () // _CRS: Current Resource Settings
+ {
+ GpioIo (Exclusive, PullDefault, 0x0000, 0x0000, IoRestrictionOutputOnly,
+ "\\_SB.PCI0.GPIO", 0x00, ResourceConsumer, ,
+ )
+ { // Pin list
+ 0x0064
+ }
+ })
+ Name (_DSD, Package (0x02) // _DSD: Device-Specific Data
+ {
+ ToUUID ("daffd814-6eba-4d8c-8a91-bc9bbf4aa301") /* Device Properties for _DSD */,
+ Package (0x01)
+ {
+ Package (0x02)
+ {
+ "privacy-gpio",
+ Package (0x04)
+ {
+ \_SB.PCI0.XHCI.RHUB.HS07,
+ Zero,
+ Zero,
+ One
+ }
+ }
+ }
+ })
+ }
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Stable-dep-of: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_ctrl.c | 3 +
+ drivers/media/usb/uvc/uvc_driver.c | 127 +++++++++++++++++++++++++++++
+ drivers/media/usb/uvc/uvc_entity.c | 1 +
+ drivers/media/usb/uvc/uvcvideo.h | 16 ++++
+ 4 files changed, 147 insertions(+)
+
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 698bf5bb896ec..fc23e53c0d38b 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2378,6 +2378,9 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
+ } else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) {
+ bmControls = entity->camera.bmControls;
+ bControlSize = entity->camera.bControlSize;
++ } else if (UVC_ENTITY_TYPE(entity) == UVC_EXT_GPIO_UNIT) {
++ bmControls = entity->gpio.bmControls;
++ bControlSize = entity->gpio.bControlSize;
+ }
+
+ /* Remove bogus/blacklisted controls */
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index e1d3e753e80ed..b86e46fa7c0af 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/atomic.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
+@@ -1033,6 +1034,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ }
+
+ static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
++static const u8 uvc_gpio_guid[16] = UVC_GUID_EXT_GPIO_CONTROLLER;
+ static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+@@ -1064,6 +1066,9 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ * is initialized by the caller.
+ */
+ switch (type) {
++ case UVC_EXT_GPIO_UNIT:
++ memcpy(entity->guid, uvc_gpio_guid, 16);
++ break;
+ case UVC_ITT_CAMERA:
+ memcpy(entity->guid, uvc_camera_guid, 16);
+ break;
+@@ -1467,6 +1472,108 @@ static int uvc_parse_control(struct uvc_device *dev)
+ return 0;
+ }
+
++/* -----------------------------------------------------------------------------
++ * Privacy GPIO
++ */
++
++static void uvc_gpio_event(struct uvc_device *dev)
++{
++ struct uvc_entity *unit = dev->gpio_unit;
++ struct uvc_video_chain *chain;
++ u8 new_val;
++
++ if (!unit)
++ return;
++
++ new_val = gpiod_get_value_cansleep(unit->gpio.gpio_privacy);
++
++ /* GPIO entities are always on the first chain. */
++ chain = list_first_entry(&dev->chains, struct uvc_video_chain, list);
++ uvc_ctrl_status_event(chain, unit->controls, &new_val);
++}
++
++static int uvc_gpio_get_cur(struct uvc_device *dev, struct uvc_entity *entity,
++ u8 cs, void *data, u16 size)
++{
++ if (cs != UVC_CT_PRIVACY_CONTROL || size < 1)
++ return -EINVAL;
++
++ *(u8 *)data = gpiod_get_value_cansleep(entity->gpio.gpio_privacy);
++
++ return 0;
++}
++
++static int uvc_gpio_get_info(struct uvc_device *dev, struct uvc_entity *entity,
++ u8 cs, u8 *caps)
++{
++ if (cs != UVC_CT_PRIVACY_CONTROL)
++ return -EINVAL;
++
++ *caps = UVC_CONTROL_CAP_GET | UVC_CONTROL_CAP_AUTOUPDATE;
++ return 0;
++}
++
++static irqreturn_t uvc_gpio_irq(int irq, void *data)
++{
++ struct uvc_device *dev = data;
++
++ uvc_gpio_event(dev);
++ return IRQ_HANDLED;
++}
++
++static int uvc_gpio_parse(struct uvc_device *dev)
++{
++ struct uvc_entity *unit;
++ struct gpio_desc *gpio_privacy;
++ int irq;
++
++ gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy",
++ GPIOD_IN);
++ if (IS_ERR_OR_NULL(gpio_privacy))
++ return PTR_ERR_OR_ZERO(gpio_privacy);
++
++ unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
++ if (!unit)
++ return -ENOMEM;
++
++ irq = gpiod_to_irq(gpio_privacy);
++ if (irq < 0) {
++ if (irq != EPROBE_DEFER)
++ dev_err(&dev->udev->dev,
++ "No IRQ for privacy GPIO (%d)\n", irq);
++ return irq;
++ }
++
++ unit->gpio.gpio_privacy = gpio_privacy;
++ unit->gpio.irq = irq;
++ unit->gpio.bControlSize = 1;
++ unit->gpio.bmControls = (u8 *)unit + sizeof(*unit);
++ unit->gpio.bmControls[0] = 1;
++ unit->get_cur = uvc_gpio_get_cur;
++ unit->get_info = uvc_gpio_get_info;
++ strncpy(unit->name, "GPIO", sizeof(unit->name) - 1);
++
++ list_add_tail(&unit->list, &dev->entities);
++
++ dev->gpio_unit = unit;
++
++ return 0;
++}
++
++static int uvc_gpio_init_irq(struct uvc_device *dev)
++{
++ struct uvc_entity *unit = dev->gpio_unit;
++
++ if (!unit || unit->gpio.irq < 0)
++ return 0;
++
++ return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL,
++ uvc_gpio_irq,
++ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
++ IRQF_TRIGGER_RISING,
++ "uvc_privacy_gpio", dev);
++}
++
+ /* ------------------------------------------------------------------------
+ * UVC device scan
+ */
+@@ -1988,6 +2095,13 @@ static int uvc_scan_device(struct uvc_device *dev)
+ return -1;
+ }
+
++ /* Add GPIO entity to the first chain. */
++ if (dev->gpio_unit) {
++ chain = list_first_entry(&dev->chains,
++ struct uvc_video_chain, list);
++ list_add_tail(&dev->gpio_unit->chain, &chain->entities);
++ }
++
+ return 0;
+ }
+
+@@ -2350,6 +2464,12 @@ static int uvc_probe(struct usb_interface *intf,
+ goto error;
+ }
+
++ /* Parse the associated GPIOs. */
++ if (uvc_gpio_parse(dev) < 0) {
++ uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC GPIOs\n");
++ goto error;
++ }
++
+ uvc_printk(KERN_INFO, "Found UVC %u.%02x device %s (%04x:%04x)\n",
+ dev->uvc_version >> 8, dev->uvc_version & 0xff,
+ udev->product ? udev->product : "<unnamed>",
+@@ -2394,6 +2514,13 @@ static int uvc_probe(struct usb_interface *intf,
+ "supported.\n", ret);
+ }
+
++ ret = uvc_gpio_init_irq(dev);
++ if (ret < 0) {
++ dev_err(&dev->udev->dev,
++ "Unable to request privacy GPIO IRQ (%d)\n", ret);
++ goto error;
++ }
++
+ uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ usb_enable_autosuspend(udev);
+ return 0;
+diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
+index 7c9895377118c..96e965a16d061 100644
+--- a/drivers/media/usb/uvc/uvc_entity.c
++++ b/drivers/media/usb/uvc/uvc_entity.c
+@@ -105,6 +105,7 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
+ case UVC_OTT_DISPLAY:
+ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
+ case UVC_EXTERNAL_VENDOR_SPECIFIC:
++ case UVC_EXT_GPIO_UNIT:
+ default:
+ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ break;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 0e4209dbf307f..e9eef2170d866 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -6,6 +6,7 @@
+ #error "The uvcvideo.h header is deprecated, use linux/uvcvideo.h instead."
+ #endif /* __KERNEL__ */
+
++#include <linux/atomic.h>
+ #include <linux/kernel.h>
+ #include <linux/poll.h>
+ #include <linux/usb.h>
+@@ -37,6 +38,8 @@
+ (UVC_ENTITY_IS_TERM(entity) && \
+ ((entity)->type & 0x8000) == UVC_TERM_OUTPUT)
+
++#define UVC_EXT_GPIO_UNIT 0x7ffe
++#define UVC_EXT_GPIO_UNIT_ID 0x100
+
+ /* ------------------------------------------------------------------------
+ * GUIDs
+@@ -56,6 +59,9 @@
+ #define UVC_GUID_UVC_SELECTOR \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
++#define UVC_GUID_EXT_GPIO_CONTROLLER \
++ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+
+ #define UVC_GUID_FORMAT_MJPEG \
+ { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
+@@ -213,6 +219,7 @@
+ * Structures.
+ */
+
++struct gpio_desc;
+ struct uvc_device;
+
+ /* TODO: Put the most frequently accessed fields at the beginning of
+@@ -354,6 +361,13 @@ struct uvc_entity {
+ u8 *bmControls;
+ u8 *bmControlsType;
+ } extension;
++
++ struct {
++ u8 bControlSize;
++ u8 *bmControls;
++ struct gpio_desc *gpio_privacy;
++ int irq;
++ } gpio;
+ };
+
+ u8 bNrInPins;
+@@ -696,6 +710,8 @@ struct uvc_device {
+ struct uvc_control *ctrl;
+ const void *data;
+ } async_ctrl;
++
++ struct uvc_entity *gpio_unit;
+ };
+
+ enum uvc_handle_state {
+--
+2.53.0
+
--- /dev/null
+From 3d6f50159c10b15a3f774d28f2aa4576d10ae164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:08:16 +0000
+Subject: media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+[ Upstream commit 0e2ee70291e64a30fe36960c85294726d34a103e ]
+
+Per UVC 1.1+ specification 3.7.2, units and terminals must have a non-zero
+unique ID.
+
+```
+Each Unit and Terminal within the video function is assigned a unique
+identification number, the Unit ID (UID) or Terminal ID (TID), contained in
+the bUnitID or bTerminalID field of the descriptor. The value 0x00 is
+reserved for undefined ID,
+```
+
+If we add a new entity with id 0 or a duplicated ID, it will be marked
+as UVC_INVALID_ENTITY_ID.
+
+In a previous attempt commit 3dd075fe8ebb ("media: uvcvideo: Require
+entities to have a non-zero unique ID"), we ignored all the invalid units,
+this broke a lot of non-compatible cameras. Hopefully we are more lucky
+this time.
+
+This also prevents some syzkaller reproducers from triggering warnings due
+to a chain of entities referring to themselves. In one particular case, an
+Output Unit is connected to an Input Unit, both with the same ID of 1. But
+when looking up for the source ID of the Output Unit, that same entity is
+found instead of the input entity, which leads to such warnings.
+
+In another case, a backward chain was considered finished as the source ID
+was 0. Later on, that entity was found, but its pads were not valid.
+
+Here is a sample stack trace for one of those cases.
+
+[ 20.650953] usb 1-1: new high-speed USB device number 2 using dummy_hcd
+[ 20.830206] usb 1-1: Using ep0 maxpacket: 8
+[ 20.833501] usb 1-1: config 0 descriptor??
+[ 21.038518] usb 1-1: string descriptor 0 read error: -71
+[ 21.038893] usb 1-1: Found UVC 0.00 device <unnamed> (2833:0201)
+[ 21.039299] uvcvideo 1-1:0.0: Entity type for entity Output 1 was not initialized!
+[ 21.041583] uvcvideo 1-1:0.0: Entity type for entity Input 1 was not initialized!
+[ 21.042218] ------------[ cut here ]------------
+[ 21.042536] WARNING: CPU: 0 PID: 9 at drivers/media/mc/mc-entity.c:1147 media_create_pad_link+0x2c4/0x2e0
+[ 21.043195] Modules linked in:
+[ 21.043535] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Not tainted 6.11.0-rc7-00030-g3480e43aeccf #444
+[ 21.044101] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+[ 21.044639] Workqueue: usb_hub_wq hub_event
+[ 21.045100] RIP: 0010:media_create_pad_link+0x2c4/0x2e0
+[ 21.045508] Code: fe e8 20 01 00 00 b8 f4 ff ff ff 48 83 c4 30 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc 0f 0b eb e9 0f 0b eb 0a 0f 0b eb 06 <0f> 0b eb 02 0f 0b b8 ea ff ff ff eb d4 66 2e 0f 1f 84 00 00 00 00
+[ 21.046801] RSP: 0018:ffffc9000004b318 EFLAGS: 00010246
+[ 21.047227] RAX: ffff888004e5d458 RBX: 0000000000000000 RCX: ffffffff818fccf1
+[ 21.047719] RDX: 000000000000007b RSI: 0000000000000000 RDI: ffff888004313290
+[ 21.048241] RBP: ffff888004313290 R08: 0001ffffffffffff R09: 0000000000000000
+[ 21.048701] R10: 0000000000000013 R11: 0001888004313290 R12: 0000000000000003
+[ 21.049138] R13: ffff888004313080 R14: ffff888004313080 R15: 0000000000000000
+[ 21.049648] FS: 0000000000000000(0000) GS:ffff88803ec00000(0000) knlGS:0000000000000000
+[ 21.050271] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 21.050688] CR2: 0000592cc27635b0 CR3: 000000000431c000 CR4: 0000000000750ef0
+[ 21.051136] PKRU: 55555554
+[ 21.051331] Call Trace:
+[ 21.051480] <TASK>
+[ 21.051611] ? __warn+0xc4/0x210
+[ 21.051861] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.052252] ? report_bug+0x11b/0x1a0
+[ 21.052540] ? trace_hardirqs_on+0x31/0x40
+[ 21.052901] ? handle_bug+0x3d/0x70
+[ 21.053197] ? exc_invalid_op+0x1a/0x50
+[ 21.053511] ? asm_exc_invalid_op+0x1a/0x20
+[ 21.053924] ? media_create_pad_link+0x91/0x2e0
+[ 21.054364] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.054834] ? media_create_pad_link+0x91/0x2e0
+[ 21.055131] ? _raw_spin_unlock+0x1e/0x40
+[ 21.055441] ? __v4l2_device_register_subdev+0x202/0x210
+[ 21.055837] uvc_mc_register_entities+0x358/0x400
+[ 21.056144] uvc_register_chains+0x1fd/0x290
+[ 21.056413] uvc_probe+0x380e/0x3dc0
+[ 21.056676] ? __lock_acquire+0x5aa/0x26e0
+[ 21.056946] ? find_held_lock+0x33/0xa0
+[ 21.057196] ? kernfs_activate+0x70/0x80
+[ 21.057533] ? usb_match_dynamic_id+0x1b/0x70
+[ 21.057811] ? find_held_lock+0x33/0xa0
+[ 21.058047] ? usb_match_dynamic_id+0x55/0x70
+[ 21.058330] ? lock_release+0x124/0x260
+[ 21.058657] ? usb_match_one_id_intf+0xa2/0x100
+[ 21.058997] usb_probe_interface+0x1ba/0x330
+[ 21.059399] really_probe+0x1ba/0x4c0
+[ 21.059662] __driver_probe_device+0xb2/0x180
+[ 21.059944] driver_probe_device+0x5a/0x100
+[ 21.060170] __device_attach_driver+0xe9/0x160
+[ 21.060427] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.060872] bus_for_each_drv+0xa9/0x100
+[ 21.061312] __device_attach+0xed/0x190
+[ 21.061812] device_initial_probe+0xe/0x20
+[ 21.062229] bus_probe_device+0x4d/0xd0
+[ 21.062590] device_add+0x308/0x590
+[ 21.062912] usb_set_configuration+0x7b6/0xaf0
+[ 21.063403] usb_generic_driver_probe+0x36/0x80
+[ 21.063714] usb_probe_device+0x7b/0x130
+[ 21.063936] really_probe+0x1ba/0x4c0
+[ 21.064111] __driver_probe_device+0xb2/0x180
+[ 21.064577] driver_probe_device+0x5a/0x100
+[ 21.065019] __device_attach_driver+0xe9/0x160
+[ 21.065403] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.065820] bus_for_each_drv+0xa9/0x100
+[ 21.066094] __device_attach+0xed/0x190
+[ 21.066535] device_initial_probe+0xe/0x20
+[ 21.066992] bus_probe_device+0x4d/0xd0
+[ 21.067250] device_add+0x308/0x590
+[ 21.067501] usb_new_device+0x347/0x610
+[ 21.067817] hub_event+0x156b/0x1e30
+[ 21.068060] ? process_scheduled_works+0x48b/0xaf0
+[ 21.068337] process_scheduled_works+0x5a3/0xaf0
+[ 21.068668] worker_thread+0x3cf/0x560
+[ 21.068932] ? kthread+0x109/0x1b0
+[ 21.069133] kthread+0x197/0x1b0
+[ 21.069343] ? __pfx_worker_thread+0x10/0x10
+[ 21.069598] ? __pfx_kthread+0x10/0x10
+[ 21.069908] ret_from_fork+0x32/0x40
+[ 21.070169] ? __pfx_kthread+0x10/0x10
+[ 21.070424] ret_from_fork_asm+0x1a/0x30
+[ 21.070737] </TASK>
+
+Reported-by: syzbot+0584f746fde3d52b4675@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0584f746fde3d52b4675
+Reported-by: syzbot+dd320d114deb3f5bb79b@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=dd320d114deb3f5bb79b
+Reported-by: Youngjun Lee <yjjuny.lee@samsung.com>
+Fixes: a3fbc2e6bb05 ("media: mc-entity.c: use WARN_ON, validate link pads")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Co-developed-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 73 +++++++++++++++++++-----------
+ drivers/media/usb/uvc/uvcvideo.h | 2 +
+ 2 files changed, 48 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index b86e46fa7c0af..bf1456caae2d2 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -413,6 +413,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+ {
+ struct uvc_entity *entity;
+
++ if (id == UVC_INVALID_ENTITY_ID)
++ return NULL;
++
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (entity->id == id)
+ return entity;
+@@ -1039,14 +1042,27 @@ static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+- unsigned int num_pads, unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
++ u16 id, unsigned int num_pads,
++ unsigned int extra_size)
+ {
+ struct uvc_entity *entity;
+ unsigned int num_inputs;
+ unsigned int size;
+ unsigned int i;
+
++ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
++ if (id == 0) {
++ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
++ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
++ if (uvc_entity_by_id(dev, id)) {
++ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
+ extra_size = roundup(extra_size, sizeof(*entity->pads));
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -1056,7 +1072,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ + num_inputs;
+ entity = kzalloc(size, GFP_KERNEL);
+ if (entity == NULL)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ entity->id = id;
+ entity->type = type;
+@@ -1146,10 +1162,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ break;
+ }
+
+- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+- p + 1, 2*n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
++ buffer[3], p + 1, 2 * n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1260,10 +1276,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+- 1, n + p);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
++ buffer[3], 1, n + p);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ term->camera.bControlSize = n;
+@@ -1319,10 +1335,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return 0;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+- 1, 0);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
++ buffer[3], 1, 0);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ memcpy(term->baSourceID, &buffer[7], 1);
+
+@@ -1343,9 +1359,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, 0);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[5], p);
+
+@@ -1367,9 +1384,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[4], 1);
+ unit->processing.wMaxMultiplier =
+@@ -1398,9 +1415,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1532,9 +1550,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ if (IS_ERR_OR_NULL(gpio_privacy))
+ return PTR_ERR_OR_ZERO(gpio_privacy);
+
+- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+- if (!unit)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
++ UVC_EXT_GPIO_UNIT_ID, 0, 1);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ irq = gpiod_to_irq(gpio_privacy);
+ if (irq < 0) {
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index e9eef2170d866..895db550f11db 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -41,6 +41,8 @@
+ #define UVC_EXT_GPIO_UNIT 0x7ffe
+ #define UVC_EXT_GPIO_UNIT_ID 0x100
+
++#define UVC_INVALID_ENTITY_ID 0xffff
++
+ /* ------------------------------------------------------------------------
+ * GUIDs
+ */
+--
+2.53.0
+
--- /dev/null
+From 1dddd8f2ba7626eac0aca525e8d641a8599a76fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Dec 2020 14:35:17 +0100
+Subject: media: uvcvideo: Move guid to entity
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 351509c604dcb065305a165d7552058c2cbc447d ]
+
+Instead of having multiple copies of the entity guid on the code, move
+it to the entity structure.
+
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Stable-dep-of: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_ctrl.c | 30 ++++--------------------------
+ drivers/media/usb/uvc/uvc_driver.c | 25 +++++++++++++++++++++++--
+ drivers/media/usb/uvc/uvcvideo.h | 2 +-
+ 3 files changed, 28 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 4f67ba3a7c028..698bf5bb896ec 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -827,31 +827,10 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping,
+ * Terminal and unit management
+ */
+
+-static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+-static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
+-static const u8 uvc_media_transport_input_guid[16] =
+- UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+-
+ static int uvc_entity_match_guid(const struct uvc_entity *entity,
+- const u8 guid[16])
++ const u8 guid[16])
+ {
+- switch (UVC_ENTITY_TYPE(entity)) {
+- case UVC_ITT_CAMERA:
+- return memcmp(uvc_camera_guid, guid, 16) == 0;
+-
+- case UVC_ITT_MEDIA_TRANSPORT_INPUT:
+- return memcmp(uvc_media_transport_input_guid, guid, 16) == 0;
+-
+- case UVC_VC_PROCESSING_UNIT:
+- return memcmp(uvc_processing_guid, guid, 16) == 0;
+-
+- case UVC_VC_EXTENSION_UNIT:
+- return memcmp(entity->extension.guidExtensionCode,
+- guid, 16) == 0;
+-
+- default:
+- return 0;
+- }
++ return memcmp(entity->guid, guid, sizeof(entity->guid)) == 0;
+ }
+
+ /* ------------------------------------------------------------------------
+@@ -1882,8 +1861,7 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
+ if (data == NULL)
+ return -ENOMEM;
+
+- memcpy(info->entity, ctrl->entity->extension.guidExtensionCode,
+- sizeof(info->entity));
++ memcpy(info->entity, ctrl->entity->guid, sizeof(info->entity));
+ info->index = ctrl->index;
+ info->selector = ctrl->index + 1;
+
+@@ -1989,7 +1967,7 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+
+ if (!found) {
+ uvc_trace(UVC_TRACE_CONTROL, "Control %pUl/%u not found.\n",
+- entity->extension.guidExtensionCode, xqry->selector);
++ entity->guid, xqry->selector);
+ return -ENOENT;
+ }
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 419fbdbb7a3b8..15202269194ad 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1032,6 +1032,11 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ return ret;
+ }
+
++static const u8 uvc_camera_guid[16] = UVC_GUID_UVC_CAMERA;
++static const u8 uvc_media_transport_input_guid[16] =
++ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
++static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
++
+ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
+ unsigned int num_pads, unsigned int extra_size)
+ {
+@@ -1054,6 +1059,22 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
+ entity->id = id;
+ entity->type = type;
+
++ /*
++ * Set the GUID for standard entity types. For extension units, the GUID
++ * is initialized by the caller.
++ */
++ switch (type) {
++ case UVC_ITT_CAMERA:
++ memcpy(entity->guid, uvc_camera_guid, 16);
++ break;
++ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
++ memcpy(entity->guid, uvc_media_transport_input_guid, 16);
++ break;
++ case UVC_VC_PROCESSING_UNIT:
++ memcpy(entity->guid, uvc_processing_guid, 16);
++ break;
++ }
++
+ entity->num_links = 0;
+ entity->num_pads = num_pads;
+ entity->pads = ((void *)(entity + 1)) + extra_size;
+@@ -1125,7 +1146,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ if (unit == NULL)
+ return -ENOMEM;
+
+- memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
++ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+ memcpy(unit->baSourceID, &buffer[22], p);
+ unit->extension.bControlSize = buffer[22+p];
+@@ -1376,7 +1397,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ if (unit == NULL)
+ return -ENOMEM;
+
+- memcpy(unit->extension.guidExtensionCode, &buffer[4], 16);
++ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+ memcpy(unit->baSourceID, &buffer[22], p);
+ unit->extension.bControlSize = buffer[22+p];
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 60a8749c97a9d..656ab4d9356c2 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -305,6 +305,7 @@ struct uvc_entity {
+ u8 id;
+ u16 type;
+ char name[64];
++ u8 guid[16];
+
+ /* Media controller-related fields. */
+ struct video_device *vdev;
+@@ -343,7 +344,6 @@ struct uvc_entity {
+ } selector;
+
+ struct {
+- u8 guidExtensionCode[16];
+ u8 bNumControls;
+ u8 bControlSize;
+ u8 *bmControls;
+--
+2.53.0
+
--- /dev/null
+From 7d49628639436ec50d67f60938938e98ed23569c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 10:36:17 +0000
+Subject: media: uvcvideo: Use heuristic to find stream entity
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 758dbc756aad429da11c569c0d067f7fd032bcf7 ]
+
+Some devices, like the Grandstream GUV3100 webcam, have an invalid UVC
+descriptor where multiple entities share the same ID, this is invalid
+and makes it impossible to make a proper entity tree without heuristics.
+
+We have recently introduced a change in the way that we handle invalid
+entities that has caused a regression on broken devices.
+
+Implement a new heuristic to handle these devices properly.
+
+Reported-by: Angel4005 <ooara1337@gmail.com>
+Closes: https://lore.kernel.org/linux-media/CAOzBiVuS7ygUjjhCbyWg-KiNx+HFTYnqH5+GJhd6cYsNLT=DaA@mail.gmail.com/
+Fixes: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index bf1456caae2d2..5c07e79430e93 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -443,13 +443,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
+
+ static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id)
+ {
+- struct uvc_streaming *stream;
++ struct uvc_streaming *stream, *last_stream;
++ unsigned int count = 0;
+
+ list_for_each_entry(stream, &dev->streams, list) {
++ count += 1;
++ last_stream = stream;
+ if (stream->header.bTerminalLink == id)
+ return stream;
+ }
+
++ /*
++ * If the streaming entity is referenced by an invalid ID, notify the
++ * user and use heuristics to guess the correct entity.
++ */
++ if (count == 1 && id == UVC_INVALID_ENTITY_ID) {
++ dev_warn(&dev->intf->dev,
++ "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one.");
++ return last_stream;
++ }
++
+ return NULL;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 2402f7cca0951ebc089a06d977ea813980110a3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:48 +0100
+Subject: mm/hugetlb: fix copy_hugetlb_page_range() to use ->pt_share_count
+
+From: Jane Chu <jane.chu@oracle.com>
+
+commit 59d9094df3d79 ("mm: hugetlb: independent PMD page table shared
+count") introduced ->pt_share_count dedicated to hugetlb PMD share count
+tracking, but omitted fixing copy_hugetlb_page_range(), leaving the
+function relying on page_count() for tracking that no longer works.
+
+When lazy page table copy for hugetlb is disabled, that is, revert commit
+bcd51a3c679d ("hugetlb: lazy page table copies in fork()") fork()'ing with
+hugetlb PMD sharing quickly lockup -
+
+[ 239.446559] watchdog: BUG: soft lockup - CPU#75 stuck for 27s!
+[ 239.446611] RIP: 0010:native_queued_spin_lock_slowpath+0x7e/0x2e0
+[ 239.446631] Call Trace:
+[ 239.446633] <TASK>
+[ 239.446636] _raw_spin_lock+0x3f/0x60
+[ 239.446639] copy_hugetlb_page_range+0x258/0xb50
+[ 239.446645] copy_page_range+0x22b/0x2c0
+[ 239.446651] dup_mmap+0x3e2/0x770
+[ 239.446654] dup_mm.constprop.0+0x5e/0x230
+[ 239.446657] copy_process+0xd17/0x1760
+[ 239.446660] kernel_clone+0xc0/0x3e0
+[ 239.446661] __do_sys_clone+0x65/0xa0
+[ 239.446664] do_syscall_64+0x82/0x930
+[ 239.446668] ? count_memcg_events+0xd2/0x190
+[ 239.446671] ? syscall_trace_enter+0x14e/0x1f0
+[ 239.446676] ? syscall_exit_work+0x118/0x150
+[ 239.446677] ? arch_exit_to_user_mode_prepare.constprop.0+0x9/0xb0
+[ 239.446681] ? clear_bhb_loop+0x30/0x80
+[ 239.446684] ? clear_bhb_loop+0x30/0x80
+[ 239.446686] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+There are two options to resolve the potential latent issue:
+ 1. warn against PMD sharing in copy_hugetlb_page_range(),
+ 2. fix it.
+This patch opts for the second option.
+While at it, simplify the comment, the details are not actually relevant
+anymore.
+
+Link: https://lkml.kernel.org/r/20250916004520.1604530-1-jane.chu@oracle.com
+Fixes: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
+Signed-off-by: Jane Chu <jane.chu@oracle.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 14967a9c7d247841b0312c48dcf8cd29e55a4cc8)
+[ David: We don't have ptdesc and the wrappers, so work directly on the
+ page->pt_share_count. CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING is still
+ called CONFIG_ARCH_WANT_HUGE_PMD_SHARE. ]
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/hugetlb.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index a2cab8f2190f8..8fa34032bc173 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3865,16 +3865,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ break;
+ }
+
+- /*
+- * If the pagetables are shared don't copy or take references.
+- *
+- * dst_pte == src_pte is the common case of src/dest sharing.
+- * However, src could have 'unshared' and dst shares with
+- * another vma. So page_count of ptep page is checked instead
+- * to reliably determine whether pte is shared.
+- */
+- if (page_count(virt_to_page(dst_pte)) > 1)
++#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
++ /* If the pagetables are shared, there is nothing to do */
++ if (atomic_read(&virt_to_page(dst_pte)->pt_share_count))
+ continue;
++#endif
+
+ dst_ptl = huge_pte_lock(h, dst, dst_pte);
+ src_ptl = huge_pte_lockptr(h, src, src_pte);
+--
+2.53.0
+
--- /dev/null
+From 603f5dc23e52dd102b5f877d470a6fbbb436e4d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:52 +0100
+Subject: mm/hugetlb: fix excessive IPI broadcasts when unsharing PMD tables
+ using mmu_gather
+
+From: David Hildenbrand (Red Hat) <david@kernel.org>
+
+As reported, ever since commit 1013af4f585f ("mm/hugetlb: fix
+huge_pmd_unshare() vs GUP-fast race") we can end up in some situations
+where we perform so many IPI broadcasts when unsharing hugetlb PMD page
+tables that it severely regresses some workloads.
+
+In particular, when we fork()+exit(), or when we munmap() a large
+area backed by many shared PMD tables, we perform one IPI broadcast per
+unshared PMD table.
+
+There are two optimizations to be had:
+
+(1) When we process (unshare) multiple such PMD tables, such as during
+ exit(), it is sufficient to send a single IPI broadcast (as long as
+ we respect locking rules) instead of one per PMD table.
+
+ Locking prevents that any of these PMD tables could get reused before
+ we drop the lock.
+
+(2) When we are not the last sharer (> 2 users including us), there is
+ no need to send the IPI broadcast. The shared PMD tables cannot
+ become exclusive (fully unshared) before an IPI will be broadcasted
+ by the last sharer.
+
+ Concurrent GUP-fast could walk into a PMD table just before we
+ unshared it. It could then succeed in grabbing a page from the
+ shared page table even after munmap() etc succeeded (and supressed
+ an IPI). But there is not difference compared to GUP-fast just
+ sleeping for a while after grabbing the page and re-enabling IRQs.
+
+ Most importantly, GUP-fast will never walk into page tables that are
+ no-longer shared, because the last sharer will issue an IPI
+ broadcast.
+
+ (if ever required, checking whether the PUD changed in GUP-fast
+ after grabbing the page like we do in the PTE case could handle
+ this)
+
+So let's rework PMD sharing TLB flushing + IPI sync to use the mmu_gather
+infrastructure so we can implement these optimizations and demystify the
+code at least a bit. Extend the mmu_gather infrastructure to be able to
+deal with our special hugetlb PMD table sharing implementation.
+
+To make initialization of the mmu_gather easier when working on a single
+VMA (in particular, when dealing with hugetlb), provide
+tlb_gather_mmu_vma().
+
+We'll consolidate the handling for (full) unsharing of PMD tables in
+tlb_unshare_pmd_ptdesc() and tlb_flush_unshared_tables(), and track
+in "struct mmu_gather" whether we had (full) unsharing of PMD tables.
+
+Because locking is very special (concurrent unsharing+reuse must be
+prevented), we disallow deferring flushing to tlb_finish_mmu() and instead
+require an explicit earlier call to tlb_flush_unshared_tables().
+
+>From hugetlb code, we call huge_pmd_unshare_flush() where we make sure
+that the expected lock protecting us from concurrent unsharing+reuse is
+still held.
+
+Check with a VM_WARN_ON_ONCE() in tlb_finish_mmu() that
+tlb_flush_unshared_tables() was properly called earlier.
+
+Document it all properly.
+
+Notes about tlb_remove_table_sync_one() interaction with unsharing:
+
+There are two fairly tricky things:
+
+(1) tlb_remove_table_sync_one() is a NOP on architectures without
+ CONFIG_MMU_GATHER_RCU_TABLE_FREE.
+
+ Here, the assumption is that the previous TLB flush would send an
+ IPI to all relevant CPUs. Careful: some architectures like x86 only
+ send IPIs to all relevant CPUs when tlb->freed_tables is set.
+
+ The relevant architectures should be selecting
+ MMU_GATHER_RCU_TABLE_FREE, but x86 might not do that in stable
+ kernels and it might have been problematic before this patch.
+
+ Also, the arch flushing behavior (independent of IPIs) is different
+ when tlb->freed_tables is set. Do we have to enlighten them to also
+ take care of tlb->unshared_tables? So far we didn't care, so
+ hopefully we are fine. Of course, we could be setting
+ tlb->freed_tables as well, but that might then unnecessarily flush
+ too much, because the semantics of tlb->freed_tables are a bit
+ fuzzy.
+
+ This patch changes nothing in this regard.
+
+(2) tlb_remove_table_sync_one() is not a NOP on architectures with
+ CONFIG_MMU_GATHER_RCU_TABLE_FREE that actually don't need a sync.
+
+ Take x86 as an example: in the common case (!pv, !X86_FEATURE_INVLPGB)
+ we still issue IPIs during TLB flushes and don't actually need the
+ second tlb_remove_table_sync_one().
+
+ This optimized can be implemented on top of this, by checking e.g., in
+ tlb_remove_table_sync_one() whether we really need IPIs. But as
+ described in (1), it really must honor tlb->freed_tables then to
+ send IPIs to all relevant CPUs.
+
+Notes on TLB flushing changes:
+
+(1) Flushing for non-shared PMD tables
+
+ We're converting from flush_hugetlb_tlb_range() to
+ tlb_remove_huge_tlb_entry(). Given that we properly initialize the
+ MMU gather in tlb_gather_mmu_vma() to be hugetlb aware, similar to
+ __unmap_hugepage_range(), that should be fine.
+
+(2) Flushing for shared PMD tables
+
+ We're converting from various things (flush_hugetlb_tlb_range(),
+ tlb_flush_pmd_range(), flush_tlb_range()) to tlb_flush_pmd_range().
+
+ tlb_flush_pmd_range() achieves the same that
+ tlb_remove_huge_tlb_entry() would achieve in these scenarios.
+ Note that tlb_remove_huge_tlb_entry() also calls
+ __tlb_remove_tlb_entry(), however that is only implemented on
+ powerpc, which does not support PMD table sharing.
+
+ Similar to (1), tlb_gather_mmu_vma() should make sure that TLB
+ flushing keeps on working as expected.
+
+Further, note that the ptdesc_pmd_pts_dec() in huge_pmd_share() is not a
+concern, as we are holding the i_mmap_lock the whole time, preventing
+concurrent unsharing. That ptdesc_pmd_pts_dec() usage will be removed
+separately as a cleanup later.
+
+There are plenty more cleanups to be had, but they have to wait until
+this is fixed.
+
+[david@kernel.org: fix kerneldoc]
+ Link: https://lkml.kernel.org/r/f223dd74-331c-412d-93fc-69e360a5006c@kernel.org
+Link: https://lkml.kernel.org/r/20251223214037.580860-5-david@kernel.org
+Fixes: 1013af4f585f ("mm/hugetlb: fix huge_pmd_unshare() vs GUP-fast race")
+Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Reported-by: Uschakow, Stanislav" <suschako@amazon.de>
+Closes: https://lore.kernel.org/all/4d3878531c76479d9f8ca9789dc6485d@amazon.de/
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Acked-by: Harry Yoo <harry.yoo@oracle.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 8ce720d5bd91e9dc16db3604aa4b1bf76770a9a1)
+[ David: We don't have ptdesc and the wrappers, so work directly on
+ page->pt_share_count and pass "struct page" instead of "struct ptdesc".
+ CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING is still called
+ CONFIG_ARCH_WANT_HUGE_PMD_SHARE and is set even without
+ CONFIG_HUGETLB_PAGE. We don't have 550a7d60bd5e ("mm, hugepages: add
+ mremap() support for hugepage backed vma"), so move_hugetlb_page_tables()
+ does not exist. We don't have 40549ba8f8e0 ("hugetlb: use new vma_lock
+ for pmd sharing synchronization") and a98a2f0c8ce1 ("mm/rmap: split
+ migration into its own so changes in mm/rmap.c looks quite different. We
+ don't have 4ddb4d91b82f ("hugetlb: do not update address
+ in huge_pmd_unshare"), so huge_pmd_unshare() still gets a pointer to
+ an address. tlb_gather_mmu() + tlb_finish_mmu() still consume ranges, so
+ also teach tlb_gather_mmu_vma() to forward ranges. Some smaller
+ contextual stuff, in particular, around tlb_gather_mmu_full(). ]
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/asm-generic/tlb.h | 77 ++++++++++++++++++++++++++-
+ include/linux/hugetlb.h | 15 ++++--
+ include/linux/mm_types.h | 2 +
+ mm/hugetlb.c | 108 ++++++++++++++++++++++----------------
+ mm/mmu_gather.c | 36 +++++++++++++
+ mm/rmap.c | 11 ++--
+ 6 files changed, 195 insertions(+), 54 deletions(-)
+
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index f40c9534f20be..a80accdbcd255 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -46,7 +46,8 @@
+ *
+ * The mmu_gather API consists of:
+ *
+- * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
++ * - tlb_gather_mmu() / tlb_gather_mmu_vma() / tlb_finish_mmu(); start and
++ * finish a mmu_gather
+ *
+ * Finish in particular will issue a (final) TLB invalidate and free
+ * all (remaining) queued pages.
+@@ -291,6 +292,20 @@ struct mmu_gather {
+ unsigned int vma_exec : 1;
+ unsigned int vma_huge : 1;
+
++ /*
++ * Did we unshare (unmap) any shared page tables? For now only
++ * used for hugetlb PMD table sharing.
++ */
++ unsigned int unshared_tables : 1;
++
++ /*
++ * Did we unshare any page tables such that they are now exclusive
++ * and could get reused+modified by the new owner? When setting this
++ * flag, "unshared_tables" will be set as well. For now only used
++ * for hugetlb PMD table sharing.
++ */
++ unsigned int fully_unshared_tables : 1;
++
+ unsigned int batch_count;
+
+ #ifndef CONFIG_MMU_GATHER_NO_GATHER
+@@ -327,6 +342,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
++ tlb->unshared_tables = 0;
+ /*
+ * Do not reset mmu_gather::vma_* fields here, we do not
+ * call into tlb_start_vma() again to set them if there is an
+@@ -422,7 +438,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+ * these bits.
+ */
+ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+- tlb->cleared_puds || tlb->cleared_p4ds))
++ tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables))
+ return;
+
+ tlb_flush(tlb);
+@@ -660,6 +676,63 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ } while (0)
+ #endif
+
++#if defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_HUGETLB_PAGE)
++static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct page *pt,
++ unsigned long addr)
++{
++ /*
++ * The caller must make sure that concurrent unsharing + exclusive
++ * reuse is impossible until tlb_flush_unshared_tables() was called.
++ */
++ VM_WARN_ON_ONCE(!atomic_read(&pt->pt_share_count));
++ atomic_dec(&pt->pt_share_count);
++
++ /* Clearing a PUD pointing at a PMD table with PMD leaves. */
++ tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE);
++
++ /*
++ * If the page table is now exclusively owned, we fully unshared
++ * a page table.
++ */
++ if (!atomic_read(&pt->pt_share_count))
++ tlb->fully_unshared_tables = true;
++ tlb->unshared_tables = true;
++}
++
++static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb)
++{
++ /*
++ * As soon as the caller drops locks to allow for reuse of
++ * previously-shared tables, these tables could get modified and
++ * even reused outside of hugetlb context, so we have to make sure that
++ * any page table walkers (incl. TLB, GUP-fast) are aware of that
++ * change.
++ *
++ * Even if we are not fully unsharing a PMD table, we must
++ * flush the TLB for the unsharer now.
++ */
++ if (tlb->unshared_tables)
++ tlb_flush_mmu_tlbonly(tlb);
++
++ /*
++ * Similarly, we must make sure that concurrent GUP-fast will not
++ * walk previously-shared page tables that are getting modified+reused
++ * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast.
++ *
++ * We only perform this when we are the last sharer of a page table,
++ * as the IPI will reach all CPUs: any GUP-fast.
++ *
++ * Note that on configs where tlb_remove_table_sync_one() is a NOP,
++ * the expectation is that the tlb_flush_mmu_tlbonly() would have issued
++ * required IPIs already for us.
++ */
++ if (tlb->fully_unshared_tables) {
++ tlb_remove_table_sync_one();
++ tlb->fully_unshared_tables = false;
++ }
++}
++#endif
++
+ #endif /* CONFIG_MMU */
+
+ #endif /* _ASM_GENERIC__TLB_H */
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 0d3fece27031c..dfb1afa3d2821 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -166,8 +166,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
+ pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
+-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long *addr, pte_t *ptep);
++int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long *addr, pte_t *ptep);
++void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma);
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+@@ -208,13 +209,17 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
+ return NULL;
+ }
+
+-static inline int huge_pmd_unshare(struct mm_struct *mm,
+- struct vm_area_struct *vma,
+- unsigned long *addr, pte_t *ptep)
++static inline int huge_pmd_unshare(struct mmu_gather *tlb,
++ struct vm_area_struct *vma, unsigned long *addr, pte_t *ptep)
+ {
+ return 0;
+ }
+
++static inline void huge_pmd_unshare_flush(struct mmu_gather *tlb,
++ struct vm_area_struct *vma)
++{
++}
++
+ static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index b6cf570dc98cb..00a85b64e5241 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -610,6 +610,8 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
+ struct mmu_gather;
+ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
++void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long start, unsigned long end);
+ extern void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end);
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 33302279ab5ff..27fe947b8c697 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3938,7 +3938,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ struct hstate *h = hstate_vma(vma);
+ unsigned long sz = huge_page_size(h);
+ struct mmu_notifier_range range;
+- bool force_flush = false;
+
+ WARN_ON(!is_vm_hugetlb_page(vma));
+ BUG_ON(start & ~huge_page_mask(h));
+@@ -3965,10 +3964,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ continue;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
++ if (huge_pmd_unshare(tlb, vma, &address, ptep)) {
+ spin_unlock(ptl);
+- tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+- force_flush = true;
+ continue;
+ }
+
+@@ -4026,14 +4023,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ mmu_notifier_invalidate_range_end(&range);
+ tlb_end_vma(tlb, vma);
+
+- /*
+- * There is nothing protecting a previously-shared page table that we
+- * unshared through huge_pmd_unshare() from getting freed after we
+- * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
+- * succeeded, flush the range corresponding to the pud.
+- */
+- if (force_flush)
+- tlb_flush_mmu_tlbonly(tlb);
++ huge_pmd_unshare_flush(tlb, vma);
+ }
+
+ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+@@ -5043,8 +5033,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ pte_t pte;
+ struct hstate *h = hstate_vma(vma);
+ unsigned long pages = 0;
+- bool shared_pmd = false;
+ struct mmu_notifier_range range;
++ struct mmu_gather tlb;
+
+ /*
+ * In the case of shared PMDs, the area to flush could be beyond
+@@ -5057,6 +5047,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+
+ BUG_ON(address >= end);
+ flush_cache_range(vma, range.start, range.end);
++ tlb_gather_mmu_vma(&tlb, vma, range.start, range.end);
+
+ mmu_notifier_invalidate_range_start(&range);
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+@@ -5066,10 +5057,9 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ if (!ptep)
+ continue;
+ ptl = huge_pte_lock(h, mm, ptep);
+- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
++ if (huge_pmd_unshare(&tlb, vma, &address, ptep)) {
+ pages++;
+ spin_unlock(ptl);
+- shared_pmd = true;
+ continue;
+ }
+ pte = huge_ptep_get(ptep);
+@@ -5100,21 +5090,15 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ pte = arch_make_huge_pte(pte, vma, NULL, 0);
+ huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
+ pages++;
++ tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
+ }
+ spin_unlock(ptl);
+
+ cond_resched();
+ }
+- /*
+- * There is nothing protecting a previously-shared page table that we
+- * unshared through huge_pmd_unshare() from getting freed after we
+- * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
+- * succeeded, flush the range corresponding to the pud.
+- */
+- if (shared_pmd)
+- flush_hugetlb_tlb_range(vma, range.start, range.end);
+- else
+- flush_hugetlb_tlb_range(vma, start, end);
++
++ tlb_flush_mmu_tlbonly(&tlb);
++ huge_pmd_unshare_flush(&tlb, vma);
+ /*
+ * No need to call mmu_notifier_invalidate_range() we are downgrading
+ * page table protection not changing it to point to a new page.
+@@ -5123,6 +5107,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ */
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ mmu_notifier_invalidate_range_end(&range);
++ tlb_finish_mmu(&tlb, range.start, range.end);
+
+ return pages << h->order;
+ }
+@@ -5449,18 +5434,27 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ return pte;
+ }
+
+-/*
+- * unmap huge page backed by shared pte.
++/**
++ * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
++ * @tlb: the current mmu_gather.
++ * @vma: the vma covering the pmd table.
++ * @addr: pointer to the address we are trying to unshare.
++ * @ptep: pointer into the (pmd) page table.
+ *
+- * Called with page table lock held.
++ * Called with the page table lock held, the i_mmap_rwsem held in write mode
++ * and the hugetlb vma lock held in write mode.
+ *
+- * returns: 1 successfully unmapped a shared pte page
+- * 0 the underlying pte page is not shared, or it is the last user
++ * Note: The caller must call huge_pmd_unshare_flush() before dropping the
++ * i_mmap_rwsem.
++ *
++ * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
++ * was not a shared PMD table.
+ */
+-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long *addr, pte_t *ptep)
++int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long *addr, pte_t *ptep)
+ {
+ unsigned long sz = huge_page_size(hstate_vma(vma));
++ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgd = pgd_offset(mm, *addr);
+ p4d_t *p4d = p4d_offset(pgd, *addr);
+ pud_t *pud = pud_offset(p4d, *addr);
+@@ -5472,14 +5466,8 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ return 0;
+
+ pud_clear(pud);
+- /*
+- * Once our caller drops the rmap lock, some other process might be
+- * using this page table as a normal, non-hugetlb page table.
+- * Wait for pending gup_fast() in other threads to finish before letting
+- * that happen.
+- */
+- tlb_remove_table_sync_one();
+- atomic_dec(&virt_to_page(ptep)->pt_share_count);
++ tlb_unshare_pmd_ptdesc(tlb, virt_to_page(ptep), *addr);
++
+ mm_dec_nr_pmds(mm);
+ /*
+ * This update of passed address optimizes loops sequentially
+@@ -5491,6 +5479,30 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ *addr |= PUD_SIZE - PMD_SIZE;
+ return 1;
+ }
++
++/*
++ * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
++ * @tlb: the current mmu_gather.
++ * @vma: the vma covering the pmd table.
++ *
++ * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
++ * unsharing with concurrent page table walkers.
++ *
++ * This function must be called after a sequence of huge_pmd_unshare()
++ * calls while still holding the i_mmap_rwsem.
++ */
++void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
++{
++ /*
++ * We must synchronize page table unsharing such that nobody will
++ * try reusing a previously-shared page table while it might still
++ * be in use by previous sharers (TLB, GUP_fast).
++ */
++ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
++
++ tlb_flush_unshared_tables(tlb);
++}
++
+ #define want_pmd_share() (1)
+ #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+@@ -5498,12 +5510,16 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ return NULL;
+ }
+
+-int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long *addr, pte_t *ptep)
++int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long *addr, pte_t *ptep)
+ {
+ return 0;
+ }
+
++void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
++{
++}
++
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+ {
+@@ -5745,6 +5761,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ unsigned long sz = huge_page_size(h);
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_notifier_range range;
++ struct mmu_gather tlb;
+ unsigned long address;
+ spinlock_t *ptl;
+ pte_t *ptep;
+@@ -5756,6 +5773,8 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ return;
+
+ flush_cache_range(vma, start, end);
++ tlb_gather_mmu_vma(&tlb, vma, start, end);
++
+ /*
+ * No need to call adjust_range_if_pmd_sharing_possible(), because
+ * we have already done the PUD_SIZE alignment.
+@@ -5776,10 +5795,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ continue;
+ ptl = huge_pte_lock(h, mm, ptep);
+ /* We don't want 'address' to be changed */
+- huge_pmd_unshare(mm, vma, &tmp, ptep);
++ huge_pmd_unshare(&tlb, vma, &tmp, ptep);
+ spin_unlock(ptl);
+ }
+- flush_hugetlb_tlb_range(vma, start, end);
++ huge_pmd_unshare_flush(&tlb, vma);
+ if (take_locks) {
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ }
+@@ -5788,6 +5807,7 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ * Documentation/mm/mmu_notifier.rst.
+ */
+ mmu_notifier_invalidate_range_end(&range);
++ tlb_finish_mmu(&tlb, start, end);
+ }
+
+ #ifdef CONFIG_CMA
+diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
+index 205fdbb5792a9..298972351a607 100644
+--- a/mm/mmu_gather.c
++++ b/mm/mmu_gather.c
+@@ -7,6 +7,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/smp.h>
+ #include <linux/swap.h>
++#include <linux/hugetlb.h>
+
+ #include <asm/pgalloc.h>
+ #include <asm/tlb.h>
+@@ -281,10 +282,39 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ tlb->page_size = 0;
+ #endif
+
++ tlb->fully_unshared_tables = 0;
+ __tlb_reset_range(tlb);
+ inc_tlb_flush_pending(tlb->mm);
+ }
+
++/**
++ * tlb_gather_mmu_vma - initialize an mmu_gather structure for operating on a
++ * single VMA
++ * @tlb: the mmu_gather structure to initialize
++ * @vma: the vm_area_struct
++ * @start: start of the region that will be removed from the page-table
++ * @end: end of the region that will be removed from the page-table
++ *
++ * Called to initialize an (on-stack) mmu_gather structure for operating on
++ * a single VMA. In contrast to tlb_gather_mmu(), calling this function will
++ * not require another call to tlb_start_vma(). In contrast to tlb_start_vma(),
++ * this function will *not* call flush_cache_range().
++ *
++ * For hugetlb VMAs, this function will also initialize the mmu_gather
++ * page_size accordingly, not requiring a separate call to
++ * tlb_change_page_size().
++ *
++ */
++void tlb_gather_mmu_vma(struct mmu_gather *tlb, struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ tlb_gather_mmu(tlb, vma->vm_mm, start, end);
++ tlb_update_vma_flags(tlb, vma);
++ if (is_vm_hugetlb_page(vma))
++ /* All entries have the same size. */
++ tlb_change_page_size(tlb, huge_page_size(hstate_vma(vma)));
++}
++
+ /**
+ * tlb_finish_mmu - finish an mmu_gather structure
+ * @tlb: the mmu_gather structure to finish
+@@ -297,6 +327,12 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+ {
++ /*
++ * We expect an earlier huge_pmd_unshare_flush() call to sort this out,
++ * due to complicated locking requirements with page table unsharing.
++ */
++ VM_WARN_ON_ONCE(tlb->fully_unshared_tables);
++
+ /*
+ * If there are parallel threads are doing PTE changes on same range
+ * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 315d7ceb573ae..a5da7abd15d3d 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -73,7 +73,7 @@
+ #include <linux/memremap.h>
+ #include <linux/userfaultfd_k.h>
+
+-#include <asm/tlbflush.h>
++#include <asm/tlb.h>
+
+ #include <trace/events/tlb.h>
+
+@@ -1470,13 +1470,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ address = pvmw.address;
+
+ if (PageHuge(page) && !PageAnon(page)) {
++ struct mmu_gather tlb;
++
+ /*
+ * To call huge_pmd_unshare, i_mmap_rwsem must be
+ * held in write mode. Caller needs to explicitly
+ * do this outside rmap routines.
+ */
+ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
+- if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
++ tlb_gather_mmu_vma(&tlb, vma, range.start, range.end);
++ if (huge_pmd_unshare(&tlb, vma, &address, pvmw.pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD
+ * page. There is no way of knowing exactly
+@@ -1485,9 +1488,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ * already adjusted above to cover this range.
+ */
+ flush_cache_range(vma, range.start, range.end);
+- flush_tlb_range(vma, range.start, range.end);
++ huge_pmd_unshare_flush(&tlb, vma);
+ mmu_notifier_invalidate_range(mm, range.start,
+ range.end);
++ tlb_finish_mmu(&tlb, range.start, range.end);
+
+ /*
+ * The PMD table was unmapped,
+@@ -1496,6 +1500,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
++ tlb_finish_mmu(&tlb, range.start, range.end);
+ }
+
+ if (IS_ENABLED(CONFIG_MIGRATION) &&
+--
+2.53.0
+
--- /dev/null
+From a7f54c4b4fd9f5a34a1ea42bea1c30e4a5e5f056 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:49 +0100
+Subject: mm/hugetlb: fix hugetlb_pmd_shared()
+
+From: David Hildenbrand (Red Hat) <david@kernel.org>
+
+Patch series "mm/hugetlb: fixes for PMD table sharing (incl. using
+mmu_gather)", v3.
+
+One functional fix, one performance regression fix, and two related
+comment fixes.
+
+I cleaned up my prototype I recently shared [1] for the performance fix,
+deferring most of the cleanups I had in the prototype to a later point.
+While doing that I identified the other things.
+
+The goal of this patch set is to be backported to stable trees "fairly"
+easily. At least patch #1 and #4.
+
+Patch #1 fixes hugetlb_pmd_shared() not detecting any sharing
+Patch #2 + #3 are simple comment fixes that patch #4 interacts with.
+Patch #4 is a fix for the reported performance regression due to excessive
+IPI broadcasts during fork()+exit().
+
+The last patch is all about TLB flushes, IPIs and mmu_gather.
+Read: complicated
+
+There are plenty of cleanups in the future to be had + one reasonable
+optimization on x86. But that's all out of scope for this series.
+
+Runtime tested, with a focus on fixing the performance regression using
+the original reproducer [2] on x86.
+
+This patch (of 4):
+
+We switched from (wrongly) using the page count to an independent shared
+count. Now, shared page tables have a refcount of 1 (excluding
+speculative references) and instead use ptdesc->pt_share_count to identify
+sharing.
+
+We didn't convert hugetlb_pmd_shared(), so right now, we would never
+detect a shared PMD table as such, because sharing/unsharing no longer
+touches the refcount of a PMD table.
+
+Page migration, like mbind() or migrate_pages() would allow for migrating
+folios mapped into such shared PMD tables, even though the folios are not
+exclusive. In smaps we would account them as "private" although they are
+"shared", and we would be wrongly setting the PM_MMAP_EXCLUSIVE in the
+pagemap interface.
+
+Fix it by properly using ptdesc_pmd_is_shared() in hugetlb_pmd_shared().
+
+Link: https://lkml.kernel.org/r/20251223214037.580860-1-david@kernel.org
+Link: https://lkml.kernel.org/r/20251223214037.580860-2-david@kernel.org
+Link: https://lore.kernel.org/all/8cab934d-4a56-44aa-b641-bfd7e23bd673@kernel.org/ [1]
+Link: https://lore.kernel.org/all/8cab934d-4a56-44aa-b641-bfd7e23bd673@kernel.org/ [2]
+Fixes: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
+Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Reviewed-by: Lance Yang <lance.yang@linux.dev>
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Uschakow, Stanislav" <suschako@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit ca1a47cd3f5f4c46ca188b1c9a27af87d1ab2216)
+[ David: We don't have ptdesc and the wrappers, so work directly on
+ page->pt_share_count. ]
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/hugetlb.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 1c03935aa3d13..0d3fece27031c 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -955,7 +955,7 @@ static inline __init void hugetlb_cma_check(void)
+ #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ static inline bool hugetlb_pmd_shared(pte_t *pte)
+ {
+- return page_count(virt_to_page(pte)) > 1;
++ return atomic_read(&virt_to_page(pte)->pt_share_count);
+ }
+ #else
+ static inline bool hugetlb_pmd_shared(pte_t *pte)
+--
+2.53.0
+
--- /dev/null
+From e9386eb9d414c0b2b5183a5378af6af5acbabc64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:46 +0100
+Subject: mm/hugetlb: fix skipping of unsharing of pmd page tables
+
+From: David Hildenbrand (Arm) <david@kernel.org>
+
+In the 5.10 backport of commit b30c14cd6102 ("hugetlb: unshare some PMDs
+when splitting VMAs") we seemed to have missed that huge_pmd_unshare()
+still adjusts the address itself.
+
+For this reason, commit 6dfeaff93be1 ("hugetlb/userfaultfd: unshare all
+pmds for hugetlbfs when register wp") explicitly handled this case by
+passing a temporary variable instead.
+
+Fix it in 5.10 by doing the same thing.
+
+Fixes: f1082f5f3d02 ("hugetlb: unshare some PMDs when splitting VMAs")
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/hugetlb.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 8efe35ea0baa7..99a71943c1f69 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5787,11 +5787,14 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ }
+ for (address = start; address < end; address += PUD_SIZE) {
++ unsigned long tmp = address;
++
+ ptep = huge_pte_offset(mm, address, sz);
+ if (!ptep)
+ continue;
+ ptl = huge_pte_lock(h, mm, ptep);
+- huge_pmd_unshare(mm, vma, &address, ptep);
++ /* We don't want 'address' to be changed */
++ huge_pmd_unshare(mm, vma, &tmp, ptep);
+ spin_unlock(ptl);
+ }
+ flush_hugetlb_tlb_range(vma, start, end);
+--
+2.53.0
+
--- /dev/null
+From 368eb50149fb268cc77cd4a38ac0100666a5f91d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:50 +0100
+Subject: mm/hugetlb: fix two comments related to huge_pmd_unshare()
+
+From: David Hildenbrand (Red Hat) <david@kernel.org>
+
+Ever since we stopped using the page count to detect shared PMD page
+tables, these comments are outdated.
+
+The only reason we have to flush the TLB early is because once we drop the
+i_mmap_rwsem, the previously shared page table could get freed (to then
+get reallocated and used for other purpose). So we really have to flush
+the TLB before that could happen.
+
+So let's simplify the comments a bit.
+
+The "If we unshared PMDs, the TLB flush was not recorded in mmu_gather."
+part introduced as in commit a4a118f2eead ("hugetlbfs: flush TLBs
+correctly after huge_pmd_unshare") was confusing: sure it is recorded in
+the mmu_gather, otherwise tlb_flush_mmu_tlbonly() wouldn't do anything.
+So let's drop that comment while at it as well.
+
+We'll centralize these comments in a single helper as we rework the code
+next.
+
+Link: https://lkml.kernel.org/r/20251223214037.580860-3-david@kernel.org
+Fixes: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
+Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Cc: "Uschakow, Stanislav" <suschako@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 3937027caecb4f8251e82dd857ba1d749bb5a428)
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/hugetlb.c | 24 ++++++++----------------
+ 1 file changed, 8 insertions(+), 16 deletions(-)
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 8fa34032bc173..33302279ab5ff 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4027,17 +4027,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ tlb_end_vma(tlb, vma);
+
+ /*
+- * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+- * could defer the flush until now, since by holding i_mmap_rwsem we
+- * guaranteed that the last refernece would not be dropped. But we must
+- * do the flushing before we return, as otherwise i_mmap_rwsem will be
+- * dropped and the last reference to the shared PMDs page might be
+- * dropped as well.
+- *
+- * In theory we could defer the freeing of the PMD pages as well, but
+- * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+- * detect sharing, so we cannot defer the release of the page either.
+- * Instead, do flush now.
++ * There is nothing protecting a previously-shared page table that we
++ * unshared through huge_pmd_unshare() from getting freed after we
++ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
++ * succeeded, flush the range corresponding to the pud.
+ */
+ if (force_flush)
+ tlb_flush_mmu_tlbonly(tlb);
+@@ -5113,11 +5106,10 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ cond_resched();
+ }
+ /*
+- * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
+- * may have cleared our pud entry and done put_page on the page table:
+- * once we release i_mmap_rwsem, another task can do the final put_page
+- * and that page table be reused and filled with junk. If we actually
+- * did unshare a page of pmds, flush the range corresponding to the pud.
++ * There is nothing protecting a previously-shared page table that we
++ * unshared through huge_pmd_unshare() from getting freed after we
++ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
++ * succeeded, flush the range corresponding to the pud.
+ */
+ if (shared_pmd)
+ flush_hugetlb_tlb_range(vma, range.start, range.end);
+--
+2.53.0
+
--- /dev/null
+From 375a6f8dc5b68ba2473f9014e1bec44b681c7bf8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:47 +0100
+Subject: mm/hugetlb: make detecting shared pte more reliable
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+If the pagetables are shared, we shouldn't copy or take references. Since
+src could have unshared and dst shares with another vma, huge_pte_none()
+is thus used to determine whether dst_pte is shared. But this check isn't
+reliable. A shared pte could have pte none in pagetable in fact. The
+page count of ptep page should be checked here in order to reliably
+determine whether pte is shared.
+
+[lukas.bulwahn@gmail.com: remove unused local variable dst_entry in copy_hugetlb_page_range()]
+ Link: https://lkml.kernel.org/r/20220822082525.26071-1-lukas.bulwahn@gmail.com
+Link: https://lkml.kernel.org/r/20220816130553.31406-7-linmiaohe@huawei.com
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Lukas Bulwahn <lukas.bulwahn@gmail.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 3aa4ed8040e1535d95c03cef8b52cf11bf0d8546)
+[ David: We don't have 4eae4efa2c29 ("hugetlb: do early cow when page
+ pinned on src mm", so there are some contextual conflicts. ]
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/hugetlb.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 99a71943c1f69..a2cab8f2190f8 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3827,7 +3827,7 @@ static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+ {
+- pte_t *src_pte, *dst_pte, entry, dst_entry;
++ pte_t *src_pte, *dst_pte, entry;
+ struct page *ptepage;
+ unsigned long addr;
+ int cow;
+@@ -3867,27 +3867,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+
+ /*
+ * If the pagetables are shared don't copy or take references.
+- * dst_pte == src_pte is the common case of src/dest sharing.
+ *
++ * dst_pte == src_pte is the common case of src/dest sharing.
+ * However, src could have 'unshared' and dst shares with
+- * another vma. If dst_pte !none, this implies sharing.
+- * Check here before taking page table lock, and once again
+- * after taking the lock below.
++ * another vma. So page_count of ptep page is checked instead
++ * to reliably determine whether pte is shared.
+ */
+- dst_entry = huge_ptep_get(dst_pte);
+- if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
++ if (page_count(virt_to_page(dst_pte)) > 1)
+ continue;
+
+ dst_ptl = huge_pte_lock(h, dst, dst_pte);
+ src_ptl = huge_pte_lockptr(h, src, src_pte);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ entry = huge_ptep_get(src_pte);
+- dst_entry = huge_ptep_get(dst_pte);
+- if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
++ if (huge_pte_none(entry)) {
+ /*
+- * Skip if src entry none. Also, skip in the
+- * unlikely case dst entry !none as this implies
+- * sharing with another vma.
++ * Skip if src entry none.
+ */
+ ;
+ } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+--
+2.53.0
+
--- /dev/null
+From ad208c1701b9db7abe392b539dd38c568d4024d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 14:05:51 +0100
+Subject: mm/rmap: fix two comments related to huge_pmd_unshare()
+
+From: David Hildenbrand (Red Hat) <david@kernel.org>
+
+PMD page table unsharing no longer touches the refcount of a PMD page
+table. Also, it is not about dropping the refcount of a "PMD page" but
+the "PMD page table".
+
+Let's just simplify by saying that the PMD page table was unmapped,
+consequently also unmapping the folio that was mapped into this page.
+
+This code should be deduplicated in the future.
+
+Link: https://lkml.kernel.org/r/20251223214037.580860-4-david@kernel.org
+Fixes: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
+Signed-off-by: David Hildenbrand (Red Hat) <david@kernel.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Cc: Liu Shixin <liushixin2@huawei.com>
+Cc: Harry Yoo <harry.yoo@oracle.com>
+Cc: Lance Yang <lance.yang@linux.dev>
+Cc: "Uschakow, Stanislav" <suschako@amazon.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit a8682d500f691b6dfaa16ae1502d990aeb86e8be)
+[ David: We don't have 40549ba8f8e0 ("hugetlb: use new vma_lock
+ for pmd sharing synchronization") and a98a2f0c8ce1 ("mm/rmap: split
+ migration into its own so changes in mm/rmap.c looks quite different. ]
+Signed-off-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/rmap.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+diff --git a/mm/rmap.c b/mm/rmap.c
+index e6f840be18906..315d7ceb573ae 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1490,13 +1490,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ range.end);
+
+ /*
+- * The ref count of the PMD page was dropped
+- * which is part of the way map counting
+- * is done for shared PMDs. Return 'true'
+- * here. When there is no other sharing,
+- * huge_pmd_unshare returns false and we will
+- * unmap the actual page and drop map count
+- * to zero.
++ * The PMD table was unmapped,
++ * consequently unmapping the folio.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+--
+2.53.0
+
xfrm_user-fix-info-leak-in-build_report.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mm-hugetlb-fix-skipping-of-unsharing-of-pmd-page-tab.patch
+mm-hugetlb-make-detecting-shared-pte-more-reliable.patch
+mm-hugetlb-fix-copy_hugetlb_page_range-to-use-pt_sha.patch
+mm-hugetlb-fix-hugetlb_pmd_shared.patch
+mm-hugetlb-fix-two-comments-related-to-huge_pmd_unsh.patch
+mm-rmap-fix-two-comments-related-to-huge_pmd_unshare.patch
+mm-hugetlb-fix-excessive-ipi-broadcasts-when-unshari.patch
+media-uvcvideo-move-guid-to-entity.patch
+media-uvcvideo-allow-extra-entities.patch
+media-uvcvideo-implement-uvc_ext_gpio_unit.patch
+media-uvcvideo-mark-invalid-entities-with-id-uvc_inv.patch
+media-uvcvideo-use-heuristic-to-find-stream-entity.patch
--- /dev/null
+From 231a6d67c168bb2dc06d8f520234c8513ed5f4da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2026 15:22:14 +0800
+Subject: gpiolib: cdev: fix uninitialised kfifo
+
+From: Kent Gibson <warthog618@gmail.com>
+
+[ Upstream commit ee0166b637a5e376118e9659e5b4148080f1d27e ]
+
+If a line is requested with debounce, and that results in debouncing
+in software, and the line is subsequently reconfigured to enable edge
+detection then the allocation of the kfifo to contain edge events is
+overlooked. This results in events being written to and read from an
+uninitialised kfifo. Read events are returned to userspace.
+
+Initialise the kfifo in the case where the software debounce is
+already active.
+
+Fixes: 65cff7046406 ("gpiolib: cdev: support setting debounce")
+Signed-off-by: Kent Gibson <warthog618@gmail.com>
+Link: https://lore.kernel.org/r/20240510065342.36191-1-warthog618@gmail.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpiolib-cdev.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
+index 3cd19ab1fc2a0..d4b221c90bb20 100644
+--- a/drivers/gpio/gpiolib-cdev.c
++++ b/drivers/gpio/gpiolib-cdev.c
+@@ -896,6 +896,7 @@ static int edge_detector_update(struct line *line,
+ unsigned int line_idx,
+ u64 eflags, bool polarity_change)
+ {
++ int ret;
+ unsigned int debounce_period_us =
+ gpio_v2_line_config_debounce_period(lc, line_idx);
+
+@@ -907,6 +908,18 @@ static int edge_detector_update(struct line *line,
+ if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
+ WRITE_ONCE(line->eflags, eflags);
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
++ /*
++ * ensure event fifo is initialised if edge detection
++ * is now enabled.
++ */
++ eflags = eflags & GPIO_V2_LINE_EDGE_FLAGS;
++ if (eflags && !kfifo_initialized(&line->req->events)) {
++ ret = kfifo_alloc(&line->req->events,
++ line->req->event_buffer_size,
++ GFP_KERNEL);
++ if (ret)
++ return ret;
++ }
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From cae88f55cbaa490ff0e92025ac4c22f5999a7fc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2026 14:51:47 +0800
+Subject: iio: adc: ad7923: Fix buffer overflow for tx_buf and ring_xfer
+
+From: Nuno Sa <nuno.sa@analog.com>
+
+[ Upstream commit 3a4187ec454e19903fd15f6e1825a4b84e59a4cd ]
+
+The AD7923 was updated to support devices with 8 channels, but the size
+of tx_buf and ring_xfer was not increased accordingly, leading to a
+potential buffer overflow in ad7923_update_scan_mode().
+
+Fixes: 851644a60d20 ("iio: adc: ad7923: Add support for the ad7908/ad7918/ad7928")
+Cc: stable@vger.kernel.org
+Signed-off-by: Nuno Sa <nuno.sa@analog.com>
+Signed-off-by: Zicheng Qu <quzicheng@huawei.com>
+Link: https://patch.msgid.link/20241029134637.2261336-1-quzicheng@huawei.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+[ Context change fixed. ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/ad7923.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
+index b8cc94b7dd80a..a8e59fd2dcf3c 100644
+--- a/drivers/iio/adc/ad7923.c
++++ b/drivers/iio/adc/ad7923.c
+@@ -47,7 +47,7 @@
+
+ struct ad7923_state {
+ struct spi_device *spi;
+- struct spi_transfer ring_xfer[5];
++ struct spi_transfer ring_xfer[9];
+ struct spi_transfer scan_single_xfer[2];
+ struct spi_message ring_msg;
+ struct spi_message scan_single_msg;
+@@ -63,7 +63,7 @@ struct ad7923_state {
+ * Length = 8 channels + 4 extra for 8 byte timestamp
+ */
+ __be16 rx_buf[12] ____cacheline_aligned;
+- __be16 tx_buf[4];
++ __be16 tx_buf[8];
+ };
+
+ struct ad7923_chip_info {
+--
+2.53.0
+
--- /dev/null
+From fe2d0a7c1cb2f0002877c0cf525322cfb7a89ff3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:08:16 +0000
+Subject: media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+[ Upstream commit 0e2ee70291e64a30fe36960c85294726d34a103e ]
+
+Per UVC 1.1+ specification 3.7.2, units and terminals must have a non-zero
+unique ID.
+
+```
+Each Unit and Terminal within the video function is assigned a unique
+identification number, the Unit ID (UID) or Terminal ID (TID), contained in
+the bUnitID or bTerminalID field of the descriptor. The value 0x00 is
+reserved for undefined ID,
+```
+
+If we add a new entity with id 0 or a duplicated ID, it will be marked
+as UVC_INVALID_ENTITY_ID.
+
+In a previous attempt commit 3dd075fe8ebb ("media: uvcvideo: Require
+entities to have a non-zero unique ID"), we ignored all the invalid units,
+this broke a lot of non-compatible cameras. Hopefully we are more lucky
+this time.
+
+This also prevents some syzkaller reproducers from triggering warnings due
+to a chain of entities referring to themselves. In one particular case, an
+Output Unit is connected to an Input Unit, both with the same ID of 1. But
+when looking up for the source ID of the Output Unit, that same entity is
+found instead of the input entity, which leads to such warnings.
+
+In another case, a backward chain was considered finished as the source ID
+was 0. Later on, that entity was found, but its pads were not valid.
+
+Here is a sample stack trace for one of those cases.
+
+[ 20.650953] usb 1-1: new high-speed USB device number 2 using dummy_hcd
+[ 20.830206] usb 1-1: Using ep0 maxpacket: 8
+[ 20.833501] usb 1-1: config 0 descriptor??
+[ 21.038518] usb 1-1: string descriptor 0 read error: -71
+[ 21.038893] usb 1-1: Found UVC 0.00 device <unnamed> (2833:0201)
+[ 21.039299] uvcvideo 1-1:0.0: Entity type for entity Output 1 was not initialized!
+[ 21.041583] uvcvideo 1-1:0.0: Entity type for entity Input 1 was not initialized!
+[ 21.042218] ------------[ cut here ]------------
+[ 21.042536] WARNING: CPU: 0 PID: 9 at drivers/media/mc/mc-entity.c:1147 media_create_pad_link+0x2c4/0x2e0
+[ 21.043195] Modules linked in:
+[ 21.043535] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Not tainted 6.11.0-rc7-00030-g3480e43aeccf #444
+[ 21.044101] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+[ 21.044639] Workqueue: usb_hub_wq hub_event
+[ 21.045100] RIP: 0010:media_create_pad_link+0x2c4/0x2e0
+[ 21.045508] Code: fe e8 20 01 00 00 b8 f4 ff ff ff 48 83 c4 30 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc 0f 0b eb e9 0f 0b eb 0a 0f 0b eb 06 <0f> 0b eb 02 0f 0b b8 ea ff ff ff eb d4 66 2e 0f 1f 84 00 00 00 00
+[ 21.046801] RSP: 0018:ffffc9000004b318 EFLAGS: 00010246
+[ 21.047227] RAX: ffff888004e5d458 RBX: 0000000000000000 RCX: ffffffff818fccf1
+[ 21.047719] RDX: 000000000000007b RSI: 0000000000000000 RDI: ffff888004313290
+[ 21.048241] RBP: ffff888004313290 R08: 0001ffffffffffff R09: 0000000000000000
+[ 21.048701] R10: 0000000000000013 R11: 0001888004313290 R12: 0000000000000003
+[ 21.049138] R13: ffff888004313080 R14: ffff888004313080 R15: 0000000000000000
+[ 21.049648] FS: 0000000000000000(0000) GS:ffff88803ec00000(0000) knlGS:0000000000000000
+[ 21.050271] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 21.050688] CR2: 0000592cc27635b0 CR3: 000000000431c000 CR4: 0000000000750ef0
+[ 21.051136] PKRU: 55555554
+[ 21.051331] Call Trace:
+[ 21.051480] <TASK>
+[ 21.051611] ? __warn+0xc4/0x210
+[ 21.051861] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.052252] ? report_bug+0x11b/0x1a0
+[ 21.052540] ? trace_hardirqs_on+0x31/0x40
+[ 21.052901] ? handle_bug+0x3d/0x70
+[ 21.053197] ? exc_invalid_op+0x1a/0x50
+[ 21.053511] ? asm_exc_invalid_op+0x1a/0x20
+[ 21.053924] ? media_create_pad_link+0x91/0x2e0
+[ 21.054364] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.054834] ? media_create_pad_link+0x91/0x2e0
+[ 21.055131] ? _raw_spin_unlock+0x1e/0x40
+[ 21.055441] ? __v4l2_device_register_subdev+0x202/0x210
+[ 21.055837] uvc_mc_register_entities+0x358/0x400
+[ 21.056144] uvc_register_chains+0x1fd/0x290
+[ 21.056413] uvc_probe+0x380e/0x3dc0
+[ 21.056676] ? __lock_acquire+0x5aa/0x26e0
+[ 21.056946] ? find_held_lock+0x33/0xa0
+[ 21.057196] ? kernfs_activate+0x70/0x80
+[ 21.057533] ? usb_match_dynamic_id+0x1b/0x70
+[ 21.057811] ? find_held_lock+0x33/0xa0
+[ 21.058047] ? usb_match_dynamic_id+0x55/0x70
+[ 21.058330] ? lock_release+0x124/0x260
+[ 21.058657] ? usb_match_one_id_intf+0xa2/0x100
+[ 21.058997] usb_probe_interface+0x1ba/0x330
+[ 21.059399] really_probe+0x1ba/0x4c0
+[ 21.059662] __driver_probe_device+0xb2/0x180
+[ 21.059944] driver_probe_device+0x5a/0x100
+[ 21.060170] __device_attach_driver+0xe9/0x160
+[ 21.060427] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.060872] bus_for_each_drv+0xa9/0x100
+[ 21.061312] __device_attach+0xed/0x190
+[ 21.061812] device_initial_probe+0xe/0x20
+[ 21.062229] bus_probe_device+0x4d/0xd0
+[ 21.062590] device_add+0x308/0x590
+[ 21.062912] usb_set_configuration+0x7b6/0xaf0
+[ 21.063403] usb_generic_driver_probe+0x36/0x80
+[ 21.063714] usb_probe_device+0x7b/0x130
+[ 21.063936] really_probe+0x1ba/0x4c0
+[ 21.064111] __driver_probe_device+0xb2/0x180
+[ 21.064577] driver_probe_device+0x5a/0x100
+[ 21.065019] __device_attach_driver+0xe9/0x160
+[ 21.065403] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.065820] bus_for_each_drv+0xa9/0x100
+[ 21.066094] __device_attach+0xed/0x190
+[ 21.066535] device_initial_probe+0xe/0x20
+[ 21.066992] bus_probe_device+0x4d/0xd0
+[ 21.067250] device_add+0x308/0x590
+[ 21.067501] usb_new_device+0x347/0x610
+[ 21.067817] hub_event+0x156b/0x1e30
+[ 21.068060] ? process_scheduled_works+0x48b/0xaf0
+[ 21.068337] process_scheduled_works+0x5a3/0xaf0
+[ 21.068668] worker_thread+0x3cf/0x560
+[ 21.068932] ? kthread+0x109/0x1b0
+[ 21.069133] kthread+0x197/0x1b0
+[ 21.069343] ? __pfx_worker_thread+0x10/0x10
+[ 21.069598] ? __pfx_kthread+0x10/0x10
+[ 21.069908] ret_from_fork+0x32/0x40
+[ 21.070169] ? __pfx_kthread+0x10/0x10
+[ 21.070424] ret_from_fork_asm+0x1a/0x30
+[ 21.070737] </TASK>
+
+Reported-by: syzbot+0584f746fde3d52b4675@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0584f746fde3d52b4675
+Reported-by: syzbot+dd320d114deb3f5bb79b@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=dd320d114deb3f5bb79b
+Reported-by: Youngjun Lee <yjjuny.lee@samsung.com>
+Fixes: a3fbc2e6bb05 ("media: mc-entity.c: use WARN_ON, validate link pads")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Co-developed-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 73 +++++++++++++++++++-----------
+ drivers/media/usb/uvc/uvcvideo.h | 2 +
+ 2 files changed, 48 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 858fc5b26a5e5..c39c1f237d10e 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -413,6 +413,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+ {
+ struct uvc_entity *entity;
+
++ if (id == UVC_INVALID_ENTITY_ID)
++ return NULL;
++
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (entity->id == id)
+ return entity;
+@@ -1029,14 +1032,27 @@ static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+- unsigned int num_pads, unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
++ u16 id, unsigned int num_pads,
++ unsigned int extra_size)
+ {
+ struct uvc_entity *entity;
+ unsigned int num_inputs;
+ unsigned int size;
+ unsigned int i;
+
++ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
++ if (id == 0) {
++ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
++ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
++ if (uvc_entity_by_id(dev, id)) {
++ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
+ extra_size = roundup(extra_size, sizeof(*entity->pads));
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -1046,7 +1062,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ + num_inputs;
+ entity = kzalloc(size, GFP_KERNEL);
+ if (entity == NULL)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ entity->id = id;
+ entity->type = type;
+@@ -1136,10 +1152,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ break;
+ }
+
+- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+- p + 1, 2*n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
++ buffer[3], p + 1, 2 * n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1249,10 +1265,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+- 1, n + p);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
++ buffer[3], 1, n + p);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ term->camera.bControlSize = n;
+@@ -1308,10 +1324,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return 0;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+- 1, 0);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
++ buffer[3], 1, 0);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ memcpy(term->baSourceID, &buffer[7], 1);
+
+@@ -1332,9 +1348,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, 0);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[5], p);
+
+@@ -1356,9 +1373,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[4], 1);
+ unit->processing.wMaxMultiplier =
+@@ -1387,9 +1404,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1528,9 +1546,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ return dev_err_probe(&dev->intf->dev, irq,
+ "No IRQ for privacy GPIO\n");
+
+- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+- if (!unit)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
++ UVC_EXT_GPIO_UNIT_ID, 0, 1);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ unit->gpio.gpio_privacy = gpio_privacy;
+ unit->gpio.irq = irq;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 95af1591f1059..be4b746d902c6 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -41,6 +41,8 @@
+ #define UVC_EXT_GPIO_UNIT 0x7ffe
+ #define UVC_EXT_GPIO_UNIT_ID 0x100
+
++#define UVC_INVALID_ENTITY_ID 0xffff
++
+ /* ------------------------------------------------------------------------
+ * GUIDs
+ */
+--
+2.53.0
+
--- /dev/null
+From 78ce6f4d2ba09e8c91918083a1c7d31f21985ca0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 10:36:17 +0000
+Subject: media: uvcvideo: Use heuristic to find stream entity
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 758dbc756aad429da11c569c0d067f7fd032bcf7 ]
+
+Some devices, like the Grandstream GUV3100 webcam, have an invalid UVC
+descriptor where multiple entities share the same ID, this is invalid
+and makes it impossible to make a proper entity tree without heuristics.
+
+We have recently introduced a change in the way that we handle invalid
+entities that has caused a regression on broken devices.
+
+Implement a new heuristic to handle these devices properly.
+
+Reported-by: Angel4005 <ooara1337@gmail.com>
+Closes: https://lore.kernel.org/linux-media/CAOzBiVuS7ygUjjhCbyWg-KiNx+HFTYnqH5+GJhd6cYsNLT=DaA@mail.gmail.com/
+Fixes: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index c39c1f237d10e..1cd68501fdc50 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -443,13 +443,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
+
+ static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id)
+ {
+- struct uvc_streaming *stream;
++ struct uvc_streaming *stream, *last_stream;
++ unsigned int count = 0;
+
+ list_for_each_entry(stream, &dev->streams, list) {
++ count += 1;
++ last_stream = stream;
+ if (stream->header.bTerminalLink == id)
+ return stream;
+ }
+
++ /*
++ * If the streaming entity is referenced by an invalid ID, notify the
++ * user and use heuristics to guess the correct entity.
++ */
++ if (count == 1 && id == UVC_INVALID_ENTITY_ID) {
++ dev_warn(&dev->intf->dev,
++ "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one.");
++ return last_stream;
++ }
++
+ return NULL;
+ }
+
+--
+2.53.0
+
mptcp-fix-slab-use-after-free-in-__inet_lookup_established.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+media-uvcvideo-mark-invalid-entities-with-id-uvc_inv.patch
+media-uvcvideo-use-heuristic-to-find-stream-entity.patch
+gpiolib-cdev-fix-uninitialised-kfifo.patch
+iio-adc-ad7923-fix-buffer-overflow-for-tx_buf-and-ri.patch
--- /dev/null
+From 02500b456a2ae977c242c51a409acf0092d4715f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:08:16 +0000
+Subject: media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+[ Upstream commit 0e2ee70291e64a30fe36960c85294726d34a103e ]
+
+Per UVC 1.1+ specification 3.7.2, units and terminals must have a non-zero
+unique ID.
+
+```
+Each Unit and Terminal within the video function is assigned a unique
+identification number, the Unit ID (UID) or Terminal ID (TID), contained in
+the bUnitID or bTerminalID field of the descriptor. The value 0x00 is
+reserved for undefined ID,
+```
+
+If we add a new entity with id 0 or a duplicated ID, it will be marked
+as UVC_INVALID_ENTITY_ID.
+
+In a previous attempt commit 3dd075fe8ebb ("media: uvcvideo: Require
+entities to have a non-zero unique ID"), we ignored all the invalid units,
+this broke a lot of non-compatible cameras. Hopefully we are more lucky
+this time.
+
+This also prevents some syzkaller reproducers from triggering warnings due
+to a chain of entities referring to themselves. In one particular case, an
+Output Unit is connected to an Input Unit, both with the same ID of 1. But
+when looking up for the source ID of the Output Unit, that same entity is
+found instead of the input entity, which leads to such warnings.
+
+In another case, a backward chain was considered finished as the source ID
+was 0. Later on, that entity was found, but its pads were not valid.
+
+Here is a sample stack trace for one of those cases.
+
+[ 20.650953] usb 1-1: new high-speed USB device number 2 using dummy_hcd
+[ 20.830206] usb 1-1: Using ep0 maxpacket: 8
+[ 20.833501] usb 1-1: config 0 descriptor??
+[ 21.038518] usb 1-1: string descriptor 0 read error: -71
+[ 21.038893] usb 1-1: Found UVC 0.00 device <unnamed> (2833:0201)
+[ 21.039299] uvcvideo 1-1:0.0: Entity type for entity Output 1 was not initialized!
+[ 21.041583] uvcvideo 1-1:0.0: Entity type for entity Input 1 was not initialized!
+[ 21.042218] ------------[ cut here ]------------
+[ 21.042536] WARNING: CPU: 0 PID: 9 at drivers/media/mc/mc-entity.c:1147 media_create_pad_link+0x2c4/0x2e0
+[ 21.043195] Modules linked in:
+[ 21.043535] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Not tainted 6.11.0-rc7-00030-g3480e43aeccf #444
+[ 21.044101] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+[ 21.044639] Workqueue: usb_hub_wq hub_event
+[ 21.045100] RIP: 0010:media_create_pad_link+0x2c4/0x2e0
+[ 21.045508] Code: fe e8 20 01 00 00 b8 f4 ff ff ff 48 83 c4 30 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc 0f 0b eb e9 0f 0b eb 0a 0f 0b eb 06 <0f> 0b eb 02 0f 0b b8 ea ff ff ff eb d4 66 2e 0f 1f 84 00 00 00 00
+[ 21.046801] RSP: 0018:ffffc9000004b318 EFLAGS: 00010246
+[ 21.047227] RAX: ffff888004e5d458 RBX: 0000000000000000 RCX: ffffffff818fccf1
+[ 21.047719] RDX: 000000000000007b RSI: 0000000000000000 RDI: ffff888004313290
+[ 21.048241] RBP: ffff888004313290 R08: 0001ffffffffffff R09: 0000000000000000
+[ 21.048701] R10: 0000000000000013 R11: 0001888004313290 R12: 0000000000000003
+[ 21.049138] R13: ffff888004313080 R14: ffff888004313080 R15: 0000000000000000
+[ 21.049648] FS: 0000000000000000(0000) GS:ffff88803ec00000(0000) knlGS:0000000000000000
+[ 21.050271] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 21.050688] CR2: 0000592cc27635b0 CR3: 000000000431c000 CR4: 0000000000750ef0
+[ 21.051136] PKRU: 55555554
+[ 21.051331] Call Trace:
+[ 21.051480] <TASK>
+[ 21.051611] ? __warn+0xc4/0x210
+[ 21.051861] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.052252] ? report_bug+0x11b/0x1a0
+[ 21.052540] ? trace_hardirqs_on+0x31/0x40
+[ 21.052901] ? handle_bug+0x3d/0x70
+[ 21.053197] ? exc_invalid_op+0x1a/0x50
+[ 21.053511] ? asm_exc_invalid_op+0x1a/0x20
+[ 21.053924] ? media_create_pad_link+0x91/0x2e0
+[ 21.054364] ? media_create_pad_link+0x2c4/0x2e0
+[ 21.054834] ? media_create_pad_link+0x91/0x2e0
+[ 21.055131] ? _raw_spin_unlock+0x1e/0x40
+[ 21.055441] ? __v4l2_device_register_subdev+0x202/0x210
+[ 21.055837] uvc_mc_register_entities+0x358/0x400
+[ 21.056144] uvc_register_chains+0x1fd/0x290
+[ 21.056413] uvc_probe+0x380e/0x3dc0
+[ 21.056676] ? __lock_acquire+0x5aa/0x26e0
+[ 21.056946] ? find_held_lock+0x33/0xa0
+[ 21.057196] ? kernfs_activate+0x70/0x80
+[ 21.057533] ? usb_match_dynamic_id+0x1b/0x70
+[ 21.057811] ? find_held_lock+0x33/0xa0
+[ 21.058047] ? usb_match_dynamic_id+0x55/0x70
+[ 21.058330] ? lock_release+0x124/0x260
+[ 21.058657] ? usb_match_one_id_intf+0xa2/0x100
+[ 21.058997] usb_probe_interface+0x1ba/0x330
+[ 21.059399] really_probe+0x1ba/0x4c0
+[ 21.059662] __driver_probe_device+0xb2/0x180
+[ 21.059944] driver_probe_device+0x5a/0x100
+[ 21.060170] __device_attach_driver+0xe9/0x160
+[ 21.060427] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.060872] bus_for_each_drv+0xa9/0x100
+[ 21.061312] __device_attach+0xed/0x190
+[ 21.061812] device_initial_probe+0xe/0x20
+[ 21.062229] bus_probe_device+0x4d/0xd0
+[ 21.062590] device_add+0x308/0x590
+[ 21.062912] usb_set_configuration+0x7b6/0xaf0
+[ 21.063403] usb_generic_driver_probe+0x36/0x80
+[ 21.063714] usb_probe_device+0x7b/0x130
+[ 21.063936] really_probe+0x1ba/0x4c0
+[ 21.064111] __driver_probe_device+0xb2/0x180
+[ 21.064577] driver_probe_device+0x5a/0x100
+[ 21.065019] __device_attach_driver+0xe9/0x160
+[ 21.065403] ? __pfx___device_attach_driver+0x10/0x10
+[ 21.065820] bus_for_each_drv+0xa9/0x100
+[ 21.066094] __device_attach+0xed/0x190
+[ 21.066535] device_initial_probe+0xe/0x20
+[ 21.066992] bus_probe_device+0x4d/0xd0
+[ 21.067250] device_add+0x308/0x590
+[ 21.067501] usb_new_device+0x347/0x610
+[ 21.067817] hub_event+0x156b/0x1e30
+[ 21.068060] ? process_scheduled_works+0x48b/0xaf0
+[ 21.068337] process_scheduled_works+0x5a3/0xaf0
+[ 21.068668] worker_thread+0x3cf/0x560
+[ 21.068932] ? kthread+0x109/0x1b0
+[ 21.069133] kthread+0x197/0x1b0
+[ 21.069343] ? __pfx_worker_thread+0x10/0x10
+[ 21.069598] ? __pfx_kthread+0x10/0x10
+[ 21.069908] ret_from_fork+0x32/0x40
+[ 21.070169] ? __pfx_kthread+0x10/0x10
+[ 21.070424] ret_from_fork_asm+0x1a/0x30
+[ 21.070737] </TASK>
+
+Reported-by: syzbot+0584f746fde3d52b4675@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=0584f746fde3d52b4675
+Reported-by: syzbot+dd320d114deb3f5bb79b@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=dd320d114deb3f5bb79b
+Reported-by: Youngjun Lee <yjjuny.lee@samsung.com>
+Fixes: a3fbc2e6bb05 ("media: mc-entity.c: use WARN_ON, validate link pads")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Co-developed-by: Ricardo Ribalda <ribalda@chromium.org>
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 73 +++++++++++++++++++-----------
+ drivers/media/usb/uvc/uvcvideo.h | 2 +
+ 2 files changed, 48 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index ff5ca3163c3e1..f81bddd20bc09 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -134,6 +134,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+ {
+ struct uvc_entity *entity;
+
++ if (id == UVC_INVALID_ENTITY_ID)
++ return NULL;
++
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (entity->id == id)
+ return entity;
+@@ -757,14 +760,27 @@ static const u8 uvc_media_transport_input_guid[16] =
+ UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
+ static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
+
+-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+- unsigned int num_pads, unsigned int extra_size)
++static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
++ u16 id, unsigned int num_pads,
++ unsigned int extra_size)
+ {
+ struct uvc_entity *entity;
+ unsigned int num_inputs;
+ unsigned int size;
+ unsigned int i;
+
++ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
++ if (id == 0) {
++ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
++ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
++ if (uvc_entity_by_id(dev, id)) {
++ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
++ id = UVC_INVALID_ENTITY_ID;
++ }
++
+ extra_size = roundup(extra_size, sizeof(*entity->pads));
+ if (num_pads)
+ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
+@@ -774,7 +790,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ + num_inputs;
+ entity = kzalloc(size, GFP_KERNEL);
+ if (entity == NULL)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ entity->id = id;
+ entity->type = type;
+@@ -865,10 +881,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ break;
+ }
+
+- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
+- p + 1, 2*n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
++ buffer[3], p + 1, 2 * n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -978,10 +994,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
+- 1, n + p);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
++ buffer[3], 1, n + p);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
+ term->camera.bControlSize = n;
+@@ -1038,10 +1054,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return 0;
+ }
+
+- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
+- 1, 0);
+- if (term == NULL)
+- return -ENOMEM;
++ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
++ buffer[3], 1, 0);
++ if (IS_ERR(term))
++ return PTR_ERR(term);
+
+ memcpy(term->baSourceID, &buffer[7], 1);
+
+@@ -1062,9 +1078,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, 0);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[5], p);
+
+@@ -1086,9 +1103,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->baSourceID, &buffer[4], 1);
+ unit->processing.wMaxMultiplier =
+@@ -1117,9 +1134,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ return -EINVAL;
+ }
+
+- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
+- if (unit == NULL)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
++ p + 1, n);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ memcpy(unit->guid, &buffer[4], 16);
+ unit->extension.bNumControls = buffer[20];
+@@ -1260,9 +1278,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
+ return dev_err_probe(&dev->intf->dev, irq,
+ "No IRQ for privacy GPIO\n");
+
+- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+- if (!unit)
+- return -ENOMEM;
++ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
++ UVC_EXT_GPIO_UNIT_ID, 0, 1);
++ if (IS_ERR(unit))
++ return PTR_ERR(unit);
+
+ unit->gpio.gpio_privacy = gpio_privacy;
+ unit->gpio.irq = irq;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 45caa8523426d..a7182305390b4 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -41,6 +41,8 @@
+ #define UVC_EXT_GPIO_UNIT 0x7ffe
+ #define UVC_EXT_GPIO_UNIT_ID 0x100
+
++#define UVC_INVALID_ENTITY_ID 0xffff
++
+ /* ------------------------------------------------------------------------
+ * Driver specific constants.
+ */
+--
+2.53.0
+
--- /dev/null
+From 9ffa5081b36702f8a099d5722d79cdd8bbcbc1af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Oct 2025 10:36:17 +0000
+Subject: media: uvcvideo: Use heuristic to find stream entity
+
+From: Ricardo Ribalda <ribalda@chromium.org>
+
+[ Upstream commit 758dbc756aad429da11c569c0d067f7fd032bcf7 ]
+
+Some devices, like the Grandstream GUV3100 webcam, have an invalid UVC
+descriptor where multiple entities share the same ID, this is invalid
+and makes it impossible to make a proper entity tree without heuristics.
+
+We have recently introduced a change in the way that we handle invalid
+entities that has caused a regression on broken devices.
+
+Implement a new heuristic to handle these devices properly.
+
+Reported-by: Angel4005 <ooara1337@gmail.com>
+Closes: https://lore.kernel.org/linux-media/CAOzBiVuS7ygUjjhCbyWg-KiNx+HFTYnqH5+GJhd6cYsNLT=DaA@mail.gmail.com/
+Fixes: 0e2ee70291e6 ("media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricardo Ribalda <ribalda@chromium.org>
+Reviewed-by: Hans de Goede <hansg@kernel.org>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index f81bddd20bc09..a1c6ae97a2b93 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -164,13 +164,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev,
+
+ static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id)
+ {
+- struct uvc_streaming *stream;
++ struct uvc_streaming *stream, *last_stream;
++ unsigned int count = 0;
+
+ list_for_each_entry(stream, &dev->streams, list) {
++ count += 1;
++ last_stream = stream;
+ if (stream->header.bTerminalLink == id)
+ return stream;
+ }
+
++ /*
++ * If the streaming entity is referenced by an invalid ID, notify the
++ * user and use heuristics to guess the correct entity.
++ */
++ if (count == 1 && id == UVC_INVALID_ENTITY_ID) {
++ dev_warn(&dev->intf->dev,
++ "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one.");
++ return last_stream;
++ }
++
+ return NULL;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 93824f60160dc910360c2b4e9c774cda10b963c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:52:57 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index e0a4da4cfd8bc..53ea41be37351 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index c60e72917a281..581aa8876a74c 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1714,6 +1714,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index fdf00c228b67f..09d95482957a4 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -208,11 +208,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index be93469c0e0ec..2adf95225aa7e 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -138,6 +138,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 08a4ed770b7332045b092aa3bb160cddbdcc575d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:52:59 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 3669895a85bf2..8d111f0a5296e 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From d0e00760699067283b7dce4ec3067049850e0125 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:52:58 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index d9631f3b6460d..3669895a85bf2 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
mptcp-fix-slab-use-after-free-in-__inet_lookup_established.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+media-uvcvideo-mark-invalid-entities-with-id-uvc_inv.patch
+media-uvcvideo-use-heuristic-to-find-stream-entity.patch
--- /dev/null
+From af56e9606ce313d1f3ccc6208404a55c119ab992 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2026 18:12:56 +0800
+Subject: ASoC: simple-card-utils: Don't use __free(device_node) at
+ graph_util_parse_dai()
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit de74ec718e0788e1998eb7289ad07970e27cae27 ]
+
+commit 419d1918105e ("ASoC: simple-card-utils: use __free(device_node) for
+device node") uses __free(device_node) for dlc->of_node, but we need to
+keep it while driver is in use.
+
+Don't use __free(device_node) in graph_util_parse_dai().
+
+Fixes: 419d1918105e ("ASoC: simple-card-utils: use __free(device_node) for device node")
+Reported-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Reported-by: Detlev Casanova <detlev.casanova@collabora.com>
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Tested-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Tested-by: Detlev Casanova <detlev.casanova@collabora.com>
+Link: https://patch.msgid.link/87eczisyhh.wl-kuninori.morimoto.gx@renesas.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/generic/simple-card-utils.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index c9f92d445f4c9..0e3ae89d880e5 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -1072,6 +1072,7 @@ static int graph_get_dai_id(struct device_node *ep)
+ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc, int *is_single_link)
+ {
++ struct device_node *node;
+ struct of_phandle_args args = {};
+ struct snd_soc_dai *dai;
+ int ret;
+@@ -1079,7 +1080,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ if (!ep)
+ return 0;
+
+- struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
++ node = of_graph_get_port_parent(ep);
+
+ /*
+ * Try to find from DAI node
+@@ -1121,8 +1122,10 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ * if he unbinded CPU or Codec.
+ */
+ ret = snd_soc_get_dlc(&args, dlc);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(node);
+ return ret;
++ }
+
+ parse_dai_end:
+ if (is_single_link)
+--
+2.53.0
+
--- /dev/null
+From 6533ff519fb4bf714eb97cc67ef46148f0000936 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 15:55:28 +0800
+Subject: blktrace: fix __this_cpu_read/write in preemptible context
+
+From: Chaitanya Kulkarni <kch@nvidia.com>
+
+[ Upstream commit da46b5dfef48658d03347cda21532bcdbb521e67 ]
+
+tracing_record_cmdline() internally uses __this_cpu_read() and
+__this_cpu_write() on the per-CPU variable trace_cmdline_save, and
+trace_save_cmdline() explicitly asserts preemption is disabled via
+lockdep_assert_preemption_disabled(). These operations are only safe
+when preemption is off, as they were designed to be called from the
+scheduler context (probe_wakeup_sched_switch() / probe_wakeup()).
+
+__blk_add_trace() was calling tracing_record_cmdline(current) early in
+the blk_tracer path, before ring buffer reservation, from process
+context where preemption is fully enabled. This triggers the following
+using blktests/blktrace/002:
+
+blktrace/002 (blktrace ftrace corruption with sysfs trace) [failed]
+ runtime 0.367s ... 0.437s
+ something found in dmesg:
+ [ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33
+ [ 81.239580] null_blk: disk nullb1 created
+ [ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516
+ [ 81.362842] caller is tracing_record_cmdline+0x10/0x40
+ [ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full)
+ [ 81.362877] Tainted: [N]=TEST
+ [ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+ [ 81.362881] Call Trace:
+ [ 81.362884] <TASK>
+ [ 81.362886] dump_stack_lvl+0x8d/0xb0
+ ...
+ (See '/mnt/sda/blktests/results/nodev/blktrace/002.dmesg' for the entire message)
+
+[ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33
+[ 81.239580] null_blk: disk nullb1 created
+[ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516
+[ 81.362842] caller is tracing_record_cmdline+0x10/0x40
+[ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full)
+[ 81.362877] Tainted: [N]=TEST
+[ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+[ 81.362881] Call Trace:
+[ 81.362884] <TASK>
+[ 81.362886] dump_stack_lvl+0x8d/0xb0
+[ 81.362895] check_preemption_disabled+0xce/0xe0
+[ 81.362902] tracing_record_cmdline+0x10/0x40
+[ 81.362923] __blk_add_trace+0x307/0x5d0
+[ 81.362934] ? lock_acquire+0xe0/0x300
+[ 81.362940] ? iov_iter_extract_pages+0x101/0xa30
+[ 81.362959] blk_add_trace_bio+0x106/0x1e0
+[ 81.362968] submit_bio_noacct_nocheck+0x24b/0x3a0
+[ 81.362979] ? lockdep_init_map_type+0x58/0x260
+[ 81.362988] submit_bio_wait+0x56/0x90
+[ 81.363009] __blkdev_direct_IO_simple+0x16c/0x250
+[ 81.363026] ? __pfx_submit_bio_wait_endio+0x10/0x10
+[ 81.363038] ? rcu_read_lock_any_held+0x73/0xa0
+[ 81.363051] blkdev_read_iter+0xc1/0x140
+[ 81.363059] vfs_read+0x20b/0x330
+[ 81.363083] ksys_read+0x67/0xe0
+[ 81.363090] do_syscall_64+0xbf/0xf00
+[ 81.363102] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 81.363106] RIP: 0033:0x7f281906029d
+[ 81.363111] Code: 31 c0 e9 c6 fe ff ff 50 48 8d 3d 66 63 0a 00 e8 59 ff 01 00 66 0f 1f 84 00 00 00 00 00 80 3d 41 33 0e 00 00 74 17 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 5b c3 66 2e 0f 1f 84 00 00 00 00 00 48 83 ec
+[ 81.363113] RSP: 002b:00007ffca127dd48 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
+[ 81.363120] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f281906029d
+[ 81.363122] RDX: 0000000000001000 RSI: 0000559f8bfae000 RDI: 0000000000000000
+[ 81.363123] RBP: 0000000000001000 R08: 0000002863a10a81 R09: 00007f281915f000
+[ 81.363124] R10: 00007f2818f77b60 R11: 0000000000000246 R12: 0000559f8bfae000
+[ 81.363126] R13: 0000000000000000 R14: 0000000000000000 R15: 000000000000000a
+[ 81.363142] </TASK>
+
+The same BUG fires from blk_add_trace_plug(), blk_add_trace_unplug(),
+and blk_add_trace_rq() paths as well.
+
+The purpose of tracing_record_cmdline() is to cache the task->comm for
+a given PID so that the trace can later resolve it. It is only
+meaningful when a trace event is actually being recorded. Ring buffer
+reservation via ring_buffer_lock_reserve() disables preemption, and
+preemption remains disabled until the event is committed :-
+
+__blk_add_trace()
+ __trace_buffer_lock_reserve()
+ __trace_buffer_lock_reserve()
+ ring_buffer_lock_reserve()
+ preempt_disable_notrace(); <---
+
+With this fix blktests for blktrace pass:
+
+ blktests (master) # ./check blktrace
+ blktrace/001 (blktrace zone management command tracing) [passed]
+ runtime 3.650s ... 3.647s
+ blktrace/002 (blktrace ftrace corruption with sysfs trace) [passed]
+ runtime 0.411s ... 0.384s
+
+Fixes: 7ffbd48d5cab ("tracing: Cache comms only after an event occurred")
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Rajani Kantha <681739313@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/blktrace.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 8fd292d34d898..6cb5772e6aa92 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -251,8 +251,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ cpu = raw_smp_processor_id();
+
+ if (blk_tracer) {
+- tracing_record_cmdline(current);
+-
+ buffer = blk_tr->array_buffer.buffer;
+ trace_ctx = tracing_gen_ctx_flags(0);
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
+@@ -260,6 +258,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ trace_ctx);
+ if (!event)
+ return;
++
++ tracing_record_cmdline(current);
+ t = ring_buffer_event_data(event);
+ goto record_it;
+ }
+--
+2.53.0
+
--- /dev/null
+From 1c6c998f63e3b5735688a4b2fe804d7355feccc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 18:18:15 +0800
+Subject: btrfs: fix incorrect return value after changing leaf in
+ lookup_extent_data_ref()
+
+From: robbieko <robbieko@synology.com>
+
+[ Upstream commit 316fb1b3169efb081d2db910cbbfef445afa03b9 ]
+
+After commit 1618aa3c2e01 ("btrfs: simplify return variables in
+lookup_extent_data_ref()"), the err and ret variables were merged into
+a single ret variable. However, when btrfs_next_leaf() returns 0
+(success), ret is overwritten from -ENOENT to 0. If the first key in
+the next leaf does not match (different objectid or type), the function
+returns 0 instead of -ENOENT, making the caller believe the lookup
+succeeded when it did not. This can lead to operations on the wrong
+extent tree item, potentially causing extent tree corruption.
+
+Fix this by returning -ENOENT directly when the key does not match,
+instead of relying on the ret variable.
+
+Fixes: 1618aa3c2e01 ("btrfs: simplify return variables in lookup_extent_data_ref()")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: robbieko <robbieko@synology.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 28da7a7b42296..3e44a303dea70 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -479,7 +479,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- return ret;
++ return -ENOENT;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+--
+2.53.0
+
--- /dev/null
+From b1404a6a81c7b7679d490f81dead33c45e63b251 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2025 11:24:19 +0100
+Subject: btrfs: make wait_on_extent_buffer_writeback() static inline
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit 075adeeb9204359e8232aeccf8b3c350ff6d9ff4 ]
+
+The simple helper can be inlined, no need for the separate function.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.c | 6 ------
+ fs/btrfs/extent_io.h | 7 ++++++-
+ 2 files changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 2e8dc928621cb..3bcb368c4127e 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1671,12 +1671,6 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
+ return ret;
+ }
+
+-void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+-{
+- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+- TASK_UNINTERRUPTIBLE);
+-}
+-
+ /*
+ * Lock extent buffer status and pages for writeback.
+ *
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index c63ccfb9fc37c..e22c4abefb9b4 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -266,7 +266,12 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
+ #define WAIT_PAGE_LOCK 2
+ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+-void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
++static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
++{
++ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
++ TASK_UNINTERRUPTIBLE);
++}
++
+ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 owner_root, u64 gen, int level);
+ void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
+--
+2.53.0
+
--- /dev/null
+From 2b24934ecea3250d72423719ca5368a39278829a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 19:52:10 +0000
+Subject: btrfs: remove pointless out labels from extent-tree.c
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ea8f9210050136bdd14f5e32b04cd01c8bd5c0ca ]
+
+Some functions (lookup_extent_data_ref(), __btrfs_mod_ref() and
+btrfs_free_tree_block()) have an 'out' label that does nothing but
+return, making it pointless. Simplify this by removing the label and
+returning instead of gotos plus setting the 'ret' variable.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 568fe9f702b74..28da7a7b42296 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -479,7 +479,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- goto fail;
++ return ret;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+@@ -490,12 +490,11 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_release_path(path);
+ goto again;
+ }
+- ret = 0;
+- break;
++ return 0;
+ }
+ path->slots[0]++;
+ }
+-fail:
++
+ return ret;
+ }
+
+@@ -2470,7 +2469,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ int i;
+ int action;
+ int level;
+- int ret = 0;
++ int ret;
+
+ if (btrfs_is_testing(fs_info))
+ return 0;
+@@ -2522,7 +2521,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ } else {
+ /* We don't know the owning_root, leave as 0. */
+ ref.bytenr = btrfs_node_blockptr(buf, i);
+@@ -2535,12 +2534,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ }
+ }
+ return 0;
+-fail:
+- return ret;
+ }
+
+ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+@@ -3469,12 +3466,12 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ return 0;
+
+ if (btrfs_header_generation(buf) != trans->transid)
+- goto out;
++ return 0;
+
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
+ ret = check_ref_cleanup(trans, buf->start);
+ if (!ret)
+- goto out;
++ return 0;
+ }
+
+ bg = btrfs_lookup_block_group(fs_info, buf->start);
+@@ -3482,7 +3479,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ /*
+@@ -3506,7 +3503,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ || btrfs_is_zoned(fs_info)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
+@@ -3516,7 +3513,6 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_put_block_group(bg);
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+
+-out:
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 90f43b63427cca16cf1e30f50e142c6b262c01af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2025 11:24:40 +0100
+Subject: btrfs: remove unused define WAIT_PAGE_LOCK for extent io
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit db9eef2ea8633714ccdcb224f13ca3f3b5ed62cc ]
+
+Last use was in the readahead code that got removed by f26c9238602856
+("btrfs: remove reada infrastructure").
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index e22c4abefb9b4..efface292a595 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -263,7 +263,6 @@ void free_extent_buffer(struct extent_buffer *eb);
+ void free_extent_buffer_stale(struct extent_buffer *eb);
+ #define WAIT_NONE 0
+ #define WAIT_COMPLETE 1
+-#define WAIT_PAGE_LOCK 2
+ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
+ static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+--
+2.53.0
+
--- /dev/null
+From 1820a3a3af74f627fa46be6679732f31a41130c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Mar 2025 17:31:38 +0100
+Subject: btrfs: remove unused flag EXTENT_BUFFER_CORRUPT
+
+From: Daniel Vacek <neelx@suse.com>
+
+[ Upstream commit c61660ec341e65650e58c92d0af71184aa216ff0 ]
+
+This flag is no longer being used. It was added by commit a826d6dcb32d
+("Btrfs: check items for correctness as we search") but it's no longer
+being used after commit f26c92386028 ("btrfs: remove reada
+infrastructure").
+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Daniel Vacek <neelx@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 11 ++---------
+ fs/btrfs/extent-tree.c | 6 ------
+ fs/btrfs/extent_io.h | 1 -
+ 3 files changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 5de12f3a679df..2dab2ce94cc40 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -225,7 +225,6 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb,
+ ASSERT(check);
+
+ while (1) {
+- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
+ ret = read_extent_buffer_pages(eb, mirror_num, check);
+ if (!ret)
+ break;
+@@ -454,15 +453,9 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
+ goto out;
+ }
+
+- /*
+- * If this is a leaf block and it is corrupt, set the corrupt bit so
+- * that we don't try and read the other copies of this block, just
+- * return -EIO.
+- */
+- if (found_level == 0 && btrfs_check_leaf(eb)) {
+- set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
++ /* If this is a leaf block and it is corrupt, just return -EIO. */
++ if (found_level == 0 && btrfs_check_leaf(eb))
+ ret = -EIO;
+- }
+
+ if (found_level > 0 && btrfs_check_node(eb))
+ ret = -EIO;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index c052c8df05fb4..568fe9f702b74 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3517,12 +3517,6 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+
+ out:
+-
+- /*
+- * Deleting the buffer, clear the corrupt flag since it doesn't
+- * matter anymore.
+- */
+- clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
+ return 0;
+ }
+
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index 4126fe7f3f10e..0eedfd7c4b6ec 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -38,7 +38,6 @@ struct btrfs_tree_parent_check;
+ enum {
+ EXTENT_BUFFER_UPTODATE,
+ EXTENT_BUFFER_DIRTY,
+- EXTENT_BUFFER_CORRUPT,
+ EXTENT_BUFFER_TREE_REF,
+ EXTENT_BUFFER_STALE,
+ EXTENT_BUFFER_WRITEBACK,
+--
+2.53.0
+
--- /dev/null
+From d93afa06f73fe15dda9e9959ea0eaf4305e17de6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Mar 2025 17:31:37 +0100
+Subject: btrfs: remove unused flag EXTENT_BUFFER_READAHEAD
+
+From: Daniel Vacek <neelx@suse.com>
+
+[ Upstream commit 350362e95fbbe86008c240093697756d52049686 ]
+
+This flag is no longer being used. It was added by commit ab0fff03055d
+("btrfs: add READAHEAD extent buffer flag") and used in commits:
+
+79fb65a1f6d9 ("Btrfs: don't call readahead hook until we have read the entire eb")
+78e62c02abb9 ("btrfs: Remove extent_io_ops::readpage_io_failed_hook")
+371cdc0700c7 ("btrfs: introduce subpage metadata validation check")
+
+Finally all the code using it was removed by commit f26c92386028 ("btrfs: remove
+reada infrastructure").
+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Daniel Vacek <neelx@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index f21fd8b50abc8..4126fe7f3f10e 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -39,8 +39,6 @@ enum {
+ EXTENT_BUFFER_UPTODATE,
+ EXTENT_BUFFER_DIRTY,
+ EXTENT_BUFFER_CORRUPT,
+- /* this got triggered by readahead */
+- EXTENT_BUFFER_READAHEAD,
+ EXTENT_BUFFER_TREE_REF,
+ EXTENT_BUFFER_STALE,
+ EXTENT_BUFFER_WRITEBACK,
+--
+2.53.0
+
--- /dev/null
+From 6c4ee55d560679e2a5753f8fec99d0d0a0041c99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2025 11:24:43 +0100
+Subject: btrfs: split waiting from read_extent_buffer_pages(), drop parameter
+ wait
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit 248c4ff3935252a82504c55cfd3592e413575bd0 ]
+
+There are only 2 WAIT_* values left for wait parameter, we can encode
+this to the function name if the waiting functionality is split.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/disk-io.c | 2 +-
+ fs/btrfs/extent_io.c | 27 +++++++++++++++++----------
+ fs/btrfs/extent_io.h | 7 ++++---
+ 3 files changed, 22 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 05e91ed0af197..5de12f3a679df 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -226,7 +226,7 @@ int btrfs_read_extent_buffer(struct extent_buffer *eb,
+
+ while (1) {
+ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
+- ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
++ ret = read_extent_buffer_pages(eb, mirror_num, check);
+ if (!ret)
+ break;
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 3bcb368c4127e..0d50f3063d346 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3636,8 +3636,8 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
+ bio_put(&bbio->bio);
+ }
+
+-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+- const struct btrfs_tree_parent_check *check)
++int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
++ const struct btrfs_tree_parent_check *check)
+ {
+ struct btrfs_bio *bbio;
+ bool ret;
+@@ -3655,7 +3655,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+
+ /* Someone else is already reading the buffer, just wait for it. */
+ if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
+- goto done;
++ return 0;
+
+ /*
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+@@ -3695,14 +3695,21 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
+ }
+ }
+ btrfs_submit_bbio(bbio, mirror_num);
++ return 0;
++}
+
+-done:
+- if (wait == WAIT_COMPLETE) {
+- wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
+- if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
+- return -EIO;
+- }
++int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
++ const struct btrfs_tree_parent_check *check)
++{
++ int ret;
+
++ ret = read_extent_buffer_pages_nowait(eb, mirror_num, check);
++ if (ret < 0)
++ return ret;
++
++ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
++ if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
++ return -EIO;
+ return 0;
+ }
+
+@@ -4434,7 +4441,7 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
+ return;
+ }
+
+- ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
++ ret = read_extent_buffer_pages_nowait(eb, 0, &check);
+ if (ret < 0)
+ free_extent_buffer_stale(eb);
+ else
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index efface292a595..f21fd8b50abc8 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -261,10 +261,11 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start);
+ void free_extent_buffer(struct extent_buffer *eb);
+ void free_extent_buffer_stale(struct extent_buffer *eb);
+-#define WAIT_NONE 0
+-#define WAIT_COMPLETE 1
+-int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
++int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ const struct btrfs_tree_parent_check *parent_check);
++int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
++ const struct btrfs_tree_parent_check *parent_check);
++
+ static inline void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+ {
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+--
+2.53.0
+
--- /dev/null
+From 8b487d9cd4aa818009e253f7413196ddbf3648de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:50:55 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 404390bb87eaf..3f11e5218e6c6 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 3c6ddc0c2c7ac..db8e02493eb89 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1871,6 +1871,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index af7412549e6ea..3220e68bd12e8 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -207,11 +207,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index 0c826f729f752..edcf04de0a6fb 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -137,6 +137,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 5f7586fbe66ccc31cd0f82fb37a033071ccedfbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:50:57 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 65f0357958fc7..24fe85fa169d1 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbex.h>
+ #include <asm/tlbmisc.h>
+ #include <asm/setup.h>
+@@ -511,87 +513,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From e266c0acaff7e28ef186de2640b343b9e04ef74e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:50:56 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 645f77e09d5b8..65f0357958fc7 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -640,7 +640,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From 83b51eee730a4dcd2a449901f436b47d22c335a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 16:24:03 +0800
+Subject: nfc: nci: complete pending data exchange on device close
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 66083581945bd5b8e99fe49b5aeb83d03f62d053 ]
+
+In nci_close_device(), complete any pending data exchange before
+closing. The data exchange callback (e.g.
+rawsock_data_exchange_complete) holds a socket reference.
+
+NIPA occasionally hits this leak:
+
+unreferenced object 0xff1100000f435000 (size 2048):
+ comm "nci_dev", pid 3954, jiffies 4295441245
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 27 00 01 40 00 00 00 00 00 00 00 00 00 00 00 00 '..@............
+ backtrace (crc ec2b3c5):
+ __kmalloc_noprof+0x4db/0x730
+ sk_prot_alloc.isra.0+0xe4/0x1d0
+ sk_alloc+0x36/0x760
+ rawsock_create+0xd1/0x540
+ nfc_sock_create+0x11f/0x280
+ __sock_create+0x22d/0x630
+ __sys_socket+0x115/0x1d0
+ __x64_sys_socket+0x72/0xd0
+ do_syscall_64+0x117/0xfc0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-4-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Rajani Kantha <681739313@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index d10e2c81131ad..058d4eb530fbd 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -567,6 +567,10 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+ del_timer_sync(&ndev->cmd_timer);
+ del_timer_sync(&ndev->data_timer);
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL,
++ ndev->cur_conn_id,
++ -ENODEV);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+@@ -597,6 +601,11 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+
+ del_timer_sync(&ndev->cmd_timer);
++ del_timer_sync(&ndev->data_timer);
++
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL, ndev->cur_conn_id,
++ -ENODEV);
+
+ /* Clear flags except NCI_UNREG */
+ ndev->flags &= BIT(NCI_UNREG);
+--
+2.53.0
+
seg6-separate-dst_cache-for-input-and-output-paths-in-seg6-lwtunnel.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+asoc-simple-card-utils-don-t-use-__free-device_node-.patch
+btrfs-make-wait_on_extent_buffer_writeback-static-in.patch
+btrfs-remove-unused-define-wait_page_lock-for-extent.patch
+btrfs-split-waiting-from-read_extent_buffer_pages-dr.patch
+btrfs-remove-unused-flag-extent_buffer_readahead.patch
+btrfs-remove-unused-flag-extent_buffer_corrupt.patch
+btrfs-remove-pointless-out-labels-from-extent-tree.c.patch
+btrfs-fix-incorrect-return-value-after-changing-leaf.patch
+blktrace-fix-__this_cpu_read-write-in-preemptible-co.patch
+nfc-nci-complete-pending-data-exchange-on-device-clo.patch
--- /dev/null
+From 23bcc192060e8e69d5da2941f33d558b643c3415 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Nov 2025 02:08:32 +0000
+Subject: af_unix: Count cyclic SCC.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 58b47c713711b8afbf68e3158d4d5acdead00e9b ]
+
+__unix_walk_scc() and unix_walk_scc_fast() call unix_scc_cyclic()
+for each SCC to check if it forms a cyclic reference, so that we
+can skip GC at the following invocations in case all SCCs do not
+have any cycles.
+
+If we count the number of cyclic SCCs in __unix_walk_scc(), we can
+simplify unix_walk_scc_fast() because the number of cyclic SCCs
+only changes when it garbage-collects a SCC.
+
+So, let's count cyclic SCC in __unix_walk_scc() and decrement it
+in unix_walk_scc_fast() when performing garbage collection.
+
+Note that we will use this counter in a later patch to check if a
+cycle existed in the previous GC run.
+
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20251115020935.2643121-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: e5b31d988a41 ("af_unix: Give up GC if MSG_PEEK intervened.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/garbage.c | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 65396a4e1b07e..9f62d50979735 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -404,9 +404,11 @@ static bool unix_scc_cyclic(struct list_head *scc)
+ static LIST_HEAD(unix_visited_vertices);
+ static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
+
+-static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
+- struct sk_buff_head *hitlist)
++static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
++ unsigned long *last_index,
++ struct sk_buff_head *hitlist)
+ {
++ unsigned long cyclic_sccs = 0;
+ LIST_HEAD(vertex_stack);
+ struct unix_edge *edge;
+ LIST_HEAD(edge_stack);
+@@ -497,8 +499,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
+ if (unix_vertex_max_scc_index < vertex->scc_index)
+ unix_vertex_max_scc_index = vertex->scc_index;
+
+- if (!unix_graph_maybe_cyclic)
+- unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
++ if (unix_scc_cyclic(&scc))
++ cyclic_sccs++;
+ }
+
+ list_del(&scc);
+@@ -507,13 +509,17 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
+ /* Need backtracking ? */
+ if (!list_empty(&edge_stack))
+ goto prev_vertex;
++
++ return cyclic_sccs;
+ }
+
++static unsigned long unix_graph_cyclic_sccs;
++
+ static void unix_walk_scc(struct sk_buff_head *hitlist)
+ {
+ unsigned long last_index = UNIX_VERTEX_INDEX_START;
++ unsigned long cyclic_sccs = 0;
+
+- unix_graph_maybe_cyclic = false;
+ unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START;
+
+ /* Visit every vertex exactly once.
+@@ -523,18 +529,20 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
+ struct unix_vertex *vertex;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+- __unix_walk_scc(vertex, &last_index, hitlist);
++ cyclic_sccs += __unix_walk_scc(vertex, &last_index, hitlist);
+ }
+
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+ swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
+
++ unix_graph_cyclic_sccs = cyclic_sccs;
++ unix_graph_maybe_cyclic = !!unix_graph_cyclic_sccs;
+ unix_graph_grouped = true;
+ }
+
+ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+ {
+- unix_graph_maybe_cyclic = false;
++ unsigned long cyclic_sccs = unix_graph_cyclic_sccs;
+
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+@@ -551,15 +559,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+ scc_dead = unix_vertex_dead(vertex);
+ }
+
+- if (scc_dead)
++ if (scc_dead) {
++ cyclic_sccs--;
+ unix_collect_skb(&scc, hitlist);
+- else if (!unix_graph_maybe_cyclic)
+- unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
++ }
+
+ list_del(&scc);
+ }
+
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
++
++ unix_graph_cyclic_sccs = cyclic_sccs;
++ unix_graph_maybe_cyclic = !!unix_graph_cyclic_sccs;
+ }
+
+ static bool gc_in_progress;
+--
+2.53.0
+
--- /dev/null
+From 4927ba3810692f8a8033d92ea444a2d57386c383 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2026 05:40:40 +0000
+Subject: af_unix: Give up GC if MSG_PEEK intervened.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit e5b31d988a41549037b8d8721a3c3cae893d8670 ]
+
+Igor Ushakov reported that GC purged the receive queue of
+an alive socket due to a race with MSG_PEEK with a nice repro.
+
+This is the exact same issue previously fixed by commit
+cbcf01128d0a ("af_unix: fix garbage collect vs MSG_PEEK").
+
+After GC was replaced with the current algorithm, the cited
+commit removed the locking dance in unix_peek_fds() and
+reintroduced the same issue.
+
+The problem is that MSG_PEEK bumps a file refcount without
+interacting with GC.
+
+Consider an SCC containing sk-A and sk-B, where sk-A is
+close()d but can be recv()ed via sk-B.
+
+The bad thing happens if sk-A is recv()ed with MSG_PEEK from
+sk-B and sk-B is close()d while GC is checking unix_vertex_dead()
+for sk-A and sk-B.
+
+ GC thread User thread
+ --------- -----------
+ unix_vertex_dead(sk-A)
+ -> true <------.
+ \
+ `------ recv(sk-B, MSG_PEEK)
+ invalidate !! -> sk-A's file refcount : 1 -> 2
+
+ close(sk-B)
+ -> sk-B's file refcount : 2 -> 1
+ unix_vertex_dead(sk-B)
+ -> true
+
+Initially, sk-A's file refcount is 1 by the inflight fd in sk-B
+recvq. GC thinks sk-A is dead because the file refcount is the
+same as the number of its inflight fds.
+
+However, sk-A's file refcount is bumped silently by MSG_PEEK,
+which invalidates the previous evaluation.
+
+At this moment, sk-B's file refcount is 2; one by the open fd,
+and one by the inflight fd in sk-A. The subsequent close()
+releases one refcount by the former.
+
+Finally, GC incorrectly concludes that both sk-A and sk-B are dead.
+
+One option is to restore the locking dance in unix_peek_fds(),
+but we can resolve this more elegantly thanks to the new algorithm.
+
+The point is that the issue does not occur without the subsequent
+close() and we actually do not need to synchronise MSG_PEEK with
+the dead SCC detection.
+
+When the issue occurs, close() and GC touch the same file refcount.
+If GC sees the refcount being decremented by close(), it can just
+give up garbage-collecting the SCC.
+
+Therefore, we only need to signal the race during MSG_PEEK with
+a proper memory barrier to make it visible to the GC.
+
+Let's use seqcount_t to notify GC when MSG_PEEK occurs and let
+it defer the SCC to the next run.
+
+This way no locking is needed on the MSG_PEEK side, and we can
+avoid imposing a penalty on every MSG_PEEK unnecessarily.
+
+Note that we can retry within unix_scc_dead() if MSG_PEEK is
+detected, but we do not do so to avoid hung task splat from
+abusive MSG_PEEK calls.
+
+Fixes: 118f457da9ed ("af_unix: Remove lock dance in unix_peek_fds().")
+Reported-by: Igor Ushakov <sysroot314@gmail.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260311054043.1231316-1-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 2 ++
+ net/unix/af_unix.h | 1 +
+ net/unix/garbage.c | 79 ++++++++++++++++++++++++++++++----------------
+ 3 files changed, 54 insertions(+), 28 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 79943fb348064..6b251c76f6bec 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1979,6 +1979,8 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ {
+ scm->fp = scm_fp_dup(UNIXCB(skb).fp);
++
++ unix_peek_fpl(scm->fp);
+ }
+
+ static void unix_destruct_scm(struct sk_buff *skb)
+diff --git a/net/unix/af_unix.h b/net/unix/af_unix.h
+index 59db179df9bb5..6b96c1007aecd 100644
+--- a/net/unix/af_unix.h
++++ b/net/unix/af_unix.h
+@@ -30,6 +30,7 @@ void unix_del_edges(struct scm_fp_list *fpl);
+ void unix_update_edges(struct unix_sock *receiver);
+ int unix_prepare_fpl(struct scm_fp_list *fpl);
+ void unix_destroy_fpl(struct scm_fp_list *fpl);
++void unix_peek_fpl(struct scm_fp_list *fpl);
+ void unix_gc(void);
+ void wait_for_unix_gc(struct scm_fp_list *fpl);
+
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 7528e2db1293f..529b21d043d92 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -319,6 +319,25 @@ void unix_destroy_fpl(struct scm_fp_list *fpl)
+ unix_free_vertices(fpl);
+ }
+
++static bool gc_in_progress;
++static seqcount_t unix_peek_seq = SEQCNT_ZERO(unix_peek_seq);
++
++void unix_peek_fpl(struct scm_fp_list *fpl)
++{
++ static DEFINE_SPINLOCK(unix_peek_lock);
++
++ if (!fpl || !fpl->count_unix)
++ return;
++
++ if (!READ_ONCE(gc_in_progress))
++ return;
++
++ /* Invalidate the final refcnt check in unix_vertex_dead(). */
++ spin_lock(&unix_peek_lock);
++ raw_write_seqcount_barrier(&unix_peek_seq);
++ spin_unlock(&unix_peek_lock);
++}
++
+ static bool unix_vertex_dead(struct unix_vertex *vertex)
+ {
+ struct unix_edge *edge;
+@@ -352,6 +371,36 @@ static bool unix_vertex_dead(struct unix_vertex *vertex)
+ return true;
+ }
+
++static LIST_HEAD(unix_visited_vertices);
++static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
++
++static bool unix_scc_dead(struct list_head *scc, bool fast)
++{
++ struct unix_vertex *vertex;
++ bool scc_dead = true;
++ unsigned int seq;
++
++ seq = read_seqcount_begin(&unix_peek_seq);
++
++ list_for_each_entry_reverse(vertex, scc, scc_entry) {
++ /* Don't restart DFS from this vertex. */
++ list_move_tail(&vertex->entry, &unix_visited_vertices);
++
++ /* Mark vertex as off-stack for __unix_walk_scc(). */
++ if (!fast)
++ vertex->index = unix_vertex_grouped_index;
++
++ if (scc_dead)
++ scc_dead = unix_vertex_dead(vertex);
++ }
++
++ /* If MSG_PEEK intervened, defer this SCC to the next round. */
++ if (read_seqcount_retry(&unix_peek_seq, seq))
++ return false;
++
++ return scc_dead;
++}
++
+ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
+ {
+ struct unix_vertex *vertex;
+@@ -405,9 +454,6 @@ static bool unix_scc_cyclic(struct list_head *scc)
+ return false;
+ }
+
+-static LIST_HEAD(unix_visited_vertices);
+-static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
+-
+ static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
+ unsigned long *last_index,
+ struct sk_buff_head *hitlist)
+@@ -475,9 +521,7 @@ static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
+ }
+
+ if (vertex->index == vertex->scc_index) {
+- struct unix_vertex *v;
+ struct list_head scc;
+- bool scc_dead = true;
+
+ /* SCC finalised.
+ *
+@@ -486,18 +530,7 @@ static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
+ */
+ __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
+
+- list_for_each_entry_reverse(v, &scc, scc_entry) {
+- /* Don't restart DFS from this vertex in unix_walk_scc(). */
+- list_move_tail(&v->entry, &unix_visited_vertices);
+-
+- /* Mark vertex as off-stack. */
+- v->index = unix_vertex_grouped_index;
+-
+- if (scc_dead)
+- scc_dead = unix_vertex_dead(v);
+- }
+-
+- if (scc_dead) {
++ if (unix_scc_dead(&scc, false)) {
+ unix_collect_skb(&scc, hitlist);
+ } else {
+ if (unix_vertex_max_scc_index < vertex->scc_index)
+@@ -550,19 +583,11 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+ while (!list_empty(&unix_unvisited_vertices)) {
+ struct unix_vertex *vertex;
+ struct list_head scc;
+- bool scc_dead = true;
+
+ vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
+ list_add(&scc, &vertex->scc_entry);
+
+- list_for_each_entry_reverse(vertex, &scc, scc_entry) {
+- list_move_tail(&vertex->entry, &unix_visited_vertices);
+-
+- if (scc_dead)
+- scc_dead = unix_vertex_dead(vertex);
+- }
+-
+- if (scc_dead) {
++ if (unix_scc_dead(&scc, true)) {
+ cyclic_sccs--;
+ unix_collect_skb(&scc, hitlist);
+ }
+@@ -576,8 +601,6 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+ unix_graph_state = cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC;
+ }
+
+-static bool gc_in_progress;
+-
+ static void __unix_gc(struct work_struct *work)
+ {
+ struct sk_buff_head hitlist;
+--
+2.53.0
+
--- /dev/null
+From 1dff48fca22dba1f16e1c956ef14f99c274d61a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Nov 2025 02:08:33 +0000
+Subject: af_unix: Simplify GC state.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 6b6f3c71fe568aa8ed3e16e9135d88a5f4fd3e84 ]
+
+GC manages its state by two variables, unix_graph_maybe_cyclic
+and unix_graph_grouped, both of which are set to false in the
+initial state.
+
+When an AF_UNIX socket is passed to an in-flight AF_UNIX socket,
+unix_update_graph() sets unix_graph_maybe_cyclic to true and
+unix_graph_grouped to false, making the next GC invocation call
+unix_walk_scc() to group SCCs.
+
+Once unix_walk_scc() finishes, sockets in the same SCC are linked
+via vertex->scc_entry. Then, unix_graph_grouped is set to true
+so that the following GC invocations can skip Tarjan's algorithm
+and simply iterate through the list in unix_walk_scc_fast().
+
+In addition, if we know there is at least one cyclic reference,
+we set unix_graph_maybe_cyclic to true so that we do not skip GC.
+
+So the state transitions as follows:
+
+ (unix_graph_maybe_cyclic, unix_graph_grouped)
+ =
+ (false, false) -> (true, false) -> (true, true) or (false, true)
+ ^.______________/________________/
+
+There is no transition to the initial state where both variables
+are false.
+
+If we consider the initial state as grouped, we can see that the
+GC actually has a tristate.
+
+Let's consolidate two variables into one enum.
+
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20251115020935.2643121-3-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: e5b31d988a41 ("af_unix: Give up GC if MSG_PEEK intervened.")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/garbage.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 9f62d50979735..7528e2db1293f 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -121,8 +121,13 @@ static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
+ return edge->successor->vertex;
+ }
+
+-static bool unix_graph_maybe_cyclic;
+-static bool unix_graph_grouped;
++enum {
++ UNIX_GRAPH_NOT_CYCLIC,
++ UNIX_GRAPH_MAYBE_CYCLIC,
++ UNIX_GRAPH_CYCLIC,
++};
++
++static unsigned char unix_graph_state;
+
+ static void unix_update_graph(struct unix_vertex *vertex)
+ {
+@@ -132,8 +137,7 @@ static void unix_update_graph(struct unix_vertex *vertex)
+ if (!vertex)
+ return;
+
+- unix_graph_maybe_cyclic = true;
+- unix_graph_grouped = false;
++ unix_graph_state = UNIX_GRAPH_MAYBE_CYCLIC;
+ }
+
+ static LIST_HEAD(unix_unvisited_vertices);
+@@ -536,8 +540,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
+ swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
+
+ unix_graph_cyclic_sccs = cyclic_sccs;
+- unix_graph_maybe_cyclic = !!unix_graph_cyclic_sccs;
+- unix_graph_grouped = true;
++ unix_graph_state = cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC;
+ }
+
+ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+@@ -570,7 +573,7 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
+ list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+
+ unix_graph_cyclic_sccs = cyclic_sccs;
+- unix_graph_maybe_cyclic = !!unix_graph_cyclic_sccs;
++ unix_graph_state = cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC;
+ }
+
+ static bool gc_in_progress;
+@@ -582,14 +585,14 @@ static void __unix_gc(struct work_struct *work)
+
+ spin_lock(&unix_gc_lock);
+
+- if (!unix_graph_maybe_cyclic) {
++ if (unix_graph_state == UNIX_GRAPH_NOT_CYCLIC) {
+ spin_unlock(&unix_gc_lock);
+ goto skip_gc;
+ }
+
+ __skb_queue_head_init(&hitlist);
+
+- if (unix_graph_grouped)
++ if (unix_graph_state == UNIX_GRAPH_CYCLIC)
+ unix_walk_scc_fast(&hitlist);
+ else
+ unix_walk_scc(&hitlist);
+--
+2.53.0
+
--- /dev/null
+From 58ccc868214aa0f222429b0edb1fbc7dfc87b47d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 18:18:15 +0800
+Subject: btrfs: fix incorrect return value after changing leaf in
+ lookup_extent_data_ref()
+
+From: robbieko <robbieko@synology.com>
+
+[ Upstream commit 316fb1b3169efb081d2db910cbbfef445afa03b9 ]
+
+After commit 1618aa3c2e01 ("btrfs: simplify return variables in
+lookup_extent_data_ref()"), the err and ret variables were merged into
+a single ret variable. However, when btrfs_next_leaf() returns 0
+(success), ret is overwritten from -ENOENT to 0. If the first key in
+the next leaf does not match (different objectid or type), the function
+returns 0 instead of -ENOENT, making the caller believe the lookup
+succeeded when it did not. This can lead to operations on the wrong
+extent tree item, potentially causing extent tree corruption.
+
+Fix this by returning -ENOENT directly when the key does not match,
+instead of relying on the ret variable.
+
+Fixes: 1618aa3c2e01 ("btrfs: simplify return variables in lookup_extent_data_ref()")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: robbieko <robbieko@synology.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index fa83a3d8286ca..863b45092a190 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -476,7 +476,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- return ret;
++ return -ENOENT;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+--
+2.53.0
+
--- /dev/null
+From bec13ef442b59bbb051e04326a653268e9125230 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 19:52:10 +0000
+Subject: btrfs: remove pointless out labels from extent-tree.c
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ea8f9210050136bdd14f5e32b04cd01c8bd5c0ca ]
+
+Some functions (lookup_extent_data_ref(), __btrfs_mod_ref() and
+btrfs_free_tree_block()) have an 'out' label that does nothing but
+return, making it pointless. Simplify this by removing the label and
+returning instead of gotos plus setting the 'ret' variable.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 08b7109299472..fa83a3d8286ca 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -476,7 +476,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- goto fail;
++ return ret;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+@@ -487,12 +487,11 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_release_path(path);
+ goto again;
+ }
+- ret = 0;
+- break;
++ return 0;
+ }
+ path->slots[0]++;
+ }
+-fail:
++
+ return ret;
+ }
+
+@@ -2470,7 +2469,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ int i;
+ int action;
+ int level;
+- int ret = 0;
++ int ret;
+
+ if (btrfs_is_testing(fs_info))
+ return 0;
+@@ -2522,7 +2521,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ } else {
+ /* We don't know the owning_root, leave as 0. */
+ ref.bytenr = btrfs_node_blockptr(buf, i);
+@@ -2535,12 +2534,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ }
+ }
+ return 0;
+-fail:
+- return ret;
+ }
+
+ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+@@ -3473,12 +3470,12 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ return 0;
+
+ if (btrfs_header_generation(buf) != trans->transid)
+- goto out;
++ return 0;
+
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
+ ret = check_ref_cleanup(trans, buf->start);
+ if (!ret)
+- goto out;
++ return 0;
+ }
+
+ bg = btrfs_lookup_block_group(fs_info, buf->start);
+@@ -3486,7 +3483,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ /*
+@@ -3510,7 +3507,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ || btrfs_is_zoned(fs_info)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, 1);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
+@@ -3520,7 +3517,6 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_put_block_group(bg);
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+
+-out:
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 0bf879cd9c6b2b4a49beb55d9f0dc130af7e9f1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:49:58 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 404390bb87eaf..3f11e5218e6c6 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index fd60837ce50bc..211b578af6aa0 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index f799c0d723dac..12a095dbf9e2a 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1871,6 +1871,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 1e49e05ac8b1c..489612ed9d498 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -210,11 +210,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index 0c826f729f752..edcf04de0a6fb 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -137,6 +137,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From f352cb198a7b9870d32944fb6df8d5ae0ec72606 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:50:00 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 65f0357958fc7..24fe85fa169d1 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbex.h>
+ #include <asm/tlbmisc.h>
+ #include <asm/setup.h>
+@@ -511,87 +513,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From 1108d921a9952285b5d281a6c23bcfeaec409c05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:49:59 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 645f77e09d5b8..65f0357958fc7 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -640,7 +640,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
seg6-separate-dst_cache-for-input-and-output-paths-in-seg6-lwtunnel.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+btrfs-remove-pointless-out-labels-from-extent-tree.c.patch
+btrfs-fix-incorrect-return-value-after-changing-leaf.patch
+af_unix-count-cyclic-scc.patch
+af_unix-simplify-gc-state.patch
+af_unix-give-up-gc-if-msg_peek-intervened.patch
--- /dev/null
+From 466642ae9351956ea2d00fc1542b571a00e4a3de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2026 18:18:15 +0800
+Subject: btrfs: fix incorrect return value after changing leaf in
+ lookup_extent_data_ref()
+
+From: robbieko <robbieko@synology.com>
+
+[ Upstream commit 316fb1b3169efb081d2db910cbbfef445afa03b9 ]
+
+After commit 1618aa3c2e01 ("btrfs: simplify return variables in
+lookup_extent_data_ref()"), the err and ret variables were merged into
+a single ret variable. However, when btrfs_next_leaf() returns 0
+(success), ret is overwritten from -ENOENT to 0. If the first key in
+the next leaf does not match (different objectid or type), the function
+returns 0 instead of -ENOENT, making the caller believe the lookup
+succeeded when it did not. This can lead to operations on the wrong
+extent tree item, potentially causing extent tree corruption.
+
+Fix this by returning -ENOENT directly when the key does not match,
+instead of relying on the ret variable.
+
+Fixes: 1618aa3c2e01 ("btrfs: simplify return variables in lookup_extent_data_ref()")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: robbieko <robbieko@synology.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f2b1bc2107539..f5ca544e35431 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -476,7 +476,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- return ret;
++ return -ENOENT;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+--
+2.53.0
+
--- /dev/null
+From 187c0e3b2d1aff64e71f9da8d5210bec958e1b1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 19:52:10 +0000
+Subject: btrfs: remove pointless out labels from extent-tree.c
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit ea8f9210050136bdd14f5e32b04cd01c8bd5c0ca ]
+
+Some functions (lookup_extent_data_ref(), __btrfs_mod_ref() and
+btrfs_free_tree_block()) have an 'out' label that does nothing but
+return, making it pointless. Simplify this by removing the label and
+returning instead of gotos plus setting the 'ret' variable.
+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: 316fb1b3169e ("btrfs: fix incorrect return value after changing leaf in lookup_extent_data_ref()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 24 ++++++++++--------------
+ 1 file changed, 10 insertions(+), 14 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index bc0db6593f329..f2b1bc2107539 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -476,7 +476,7 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid != bytenr ||
+ key.type != BTRFS_EXTENT_DATA_REF_KEY)
+- goto fail;
++ return ret;
+
+ ref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_extent_data_ref);
+@@ -487,12 +487,11 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
+ btrfs_release_path(path);
+ goto again;
+ }
+- ret = 0;
+- break;
++ return 0;
+ }
+ path->slots[0]++;
+ }
+-fail:
++
+ return ret;
+ }
+
+@@ -2474,7 +2473,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ int i;
+ int action;
+ int level;
+- int ret = 0;
++ int ret;
+
+ if (btrfs_is_testing(fs_info))
+ return 0;
+@@ -2526,7 +2525,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ } else {
+ /* We don't know the owning_root, leave as 0. */
+ ref.bytenr = btrfs_node_blockptr(buf, i);
+@@ -2539,12 +2538,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ else
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret)
+- goto fail;
++ return ret;
+ }
+ }
+ return 0;
+-fail:
+- return ret;
+ }
+
+ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+@@ -3466,12 +3463,12 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ return 0;
+
+ if (btrfs_header_generation(buf) != trans->transid)
+- goto out;
++ return 0;
+
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
+ ret = check_ref_cleanup(trans, buf->start);
+ if (!ret)
+- goto out;
++ return 0;
+ }
+
+ bg = btrfs_lookup_block_group(fs_info, buf->start);
+@@ -3479,7 +3476,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ /*
+@@ -3503,7 +3500,7 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ || btrfs_is_zoned(fs_info)) {
+ pin_down_extent(trans, bg, buf->start, buf->len, true);
+ btrfs_put_block_group(bg);
+- goto out;
++ return 0;
+ }
+
+ WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
+@@ -3513,7 +3510,6 @@ int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ btrfs_put_block_group(bg);
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+
+-out:
+ return 0;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From fbae7f0ccc7665a06a72a653791027997568fc3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:48:10 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 404390bb87eaf..3f11e5218e6c6 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index fd60837ce50bc..211b578af6aa0 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index f799c0d723dac..12a095dbf9e2a 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1871,6 +1871,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 1e49e05ac8b1c..489612ed9d498 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -210,11 +210,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index 0c826f729f752..edcf04de0a6fb 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -137,6 +137,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From bef63ec96435bb90c2daaa16d423db4302ebafe6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:48:12 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 65f0357958fc7..24fe85fa169d1 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbex.h>
+ #include <asm/tlbmisc.h>
+ #include <asm/setup.h>
+@@ -511,87 +513,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From d58212487dedbd689baf6ad3cc59d368ede9666a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:48:11 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 645f77e09d5b8..65f0357958fc7 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -640,7 +640,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
seg6-separate-dst_cache-for-input-and-output-paths-in-seg6-lwtunnel.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+btrfs-remove-pointless-out-labels-from-extent-tree.c.patch
+btrfs-fix-incorrect-return-value-after-changing-leaf.patch
--- /dev/null
+From b5f76dbd1230d8cd7ab6d9048b2e2d39cef3b04a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2026 18:13:50 +0800
+Subject: ASoC: simple-card-utils: Don't use __free(device_node) at
+ graph_util_parse_dai()
+
+From: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+[ Upstream commit de74ec718e0788e1998eb7289ad07970e27cae27 ]
+
+commit 419d1918105e ("ASoC: simple-card-utils: use __free(device_node) for
+device node") uses __free(device_node) for dlc->of_node, but we need to
+keep it while driver is in use.
+
+Don't use __free(device_node) in graph_util_parse_dai().
+
+Fixes: 419d1918105e ("ASoC: simple-card-utils: use __free(device_node) for device node")
+Reported-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Reported-by: Detlev Casanova <detlev.casanova@collabora.com>
+Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Tested-by: Thuan Nguyen <thuan.nguyen-hong@banvien.com.vn>
+Tested-by: Detlev Casanova <detlev.casanova@collabora.com>
+Link: https://patch.msgid.link/87eczisyhh.wl-kuninori.morimoto.gx@renesas.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+[ The function asoc_graph_parse_dai() was renamed to graph_util_parse_dai() in
+commit b5a95c5bf6d6 ("ASoC: simple_card_utils.h: convert not to use asoc_xxx()")
+in 6.7. The fix should be applied to asoc_graph_parse_dai() instead in 6.6. ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/generic/simple-card-utils.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 86ccd044b93c4..a64484fe5a284 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -1061,6 +1061,7 @@ static int graph_get_dai_id(struct device_node *ep)
+ int asoc_graph_parse_dai(struct device *dev, struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc, int *is_single_link)
+ {
++ struct device_node *node;
+ struct of_phandle_args args = {};
+ struct snd_soc_dai *dai;
+ int ret;
+@@ -1068,7 +1069,7 @@ int asoc_graph_parse_dai(struct device *dev, struct device_node *ep,
+ if (!ep)
+ return 0;
+
+- struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
++ node = of_graph_get_port_parent(ep);
+
+ /*
+ * Try to find from DAI node
+@@ -1110,8 +1111,10 @@ int asoc_graph_parse_dai(struct device *dev, struct device_node *ep,
+ * if he unbinded CPU or Codec.
+ */
+ ret = snd_soc_get_dlc(&args, dlc);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(node);
+ return ret;
++ }
+
+ parse_dai_end:
+ if (is_single_link)
+--
+2.53.0
+
--- /dev/null
+From b2bdb76f2ec47af7d55073ca99bae394b80df92b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:51:50 +0100
+Subject: MIPS: Always record SEGBITS in cpu_data.vmbits
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
+
+With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
+of `cpu_vmbits' only records the size of compatibility useg and does not
+reflect the size of native xuseg or the complete range of values allowed
+in the VPN2 field of TLB entries.
+
+An upcoming change will need the actual VPN2 value range permitted even
+in 32-bit kernel configurations, so always include the `vmbits' member
+in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
+hardware and resorting to the currently hardcoded value of 31 on 32-bit
+processors. No functional change for users of `cpu_vmbits'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/include/asm/cpu-features.h | 1 -
+ arch/mips/include/asm/cpu-info.h | 2 --
+ arch/mips/include/asm/mipsregs.h | 2 ++
+ arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
+ arch/mips/kernel/cpu-r3k-probe.c | 2 ++
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 404390bb87eaf..3f11e5218e6c6 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -484,7 +484,6 @@
+ # endif
+ # ifndef cpu_vmbits
+ # define cpu_vmbits cpu_data[0].vmbits
+-# define __NEED_VMBITS_PROBE
+ # endif
+ #endif
+
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a600670d00e97..1aee44124f118 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -80,9 +80,7 @@ struct cpuinfo_mips {
+ int srsets; /* Shadow register sets */
+ int package;/* physical package number */
+ unsigned int globalnumber;
+-#ifdef CONFIG_64BIT
+ int vmbits; /* Virtual memory size in bits */
+-#endif
+ void *data; /* Additional data */
+ unsigned int watch_reg_count; /* Number that exist */
+ unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index e959a6b1a325c..92ad301395045 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -1714,6 +1714,8 @@ do { \
+
+ #define read_c0_entryhi() __read_ulong_c0_register($10, 0)
+ #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
++#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
++#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
+
+ #define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+ #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index c7fee72ea6067..d982bc3288eb7 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -208,11 +208,14 @@ static inline void set_elf_base_platform(const char *plat)
+
+ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+ {
+-#ifdef __NEED_VMBITS_PROBE
+- write_c0_entryhi(0x3fffffffffffe000ULL);
+- back_to_back_c0_hazard();
+- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+-#endif
++ int vmbits = 31;
++
++ if (cpu_has_64bits) {
++ write_c0_entryhi_64(0x3fffffffffffe000ULL);
++ back_to_back_c0_hazard();
++ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
++ }
++ c->vmbits = vmbits;
+ }
+
+ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index be93469c0e0ec..2adf95225aa7e 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -138,6 +138,8 @@ void cpu_probe(void)
+ else
+ cpu_set_nofpu_opts(c);
+
++ c->vmbits = 31;
++
+ reserve_exception_space(0, 0x400);
+ }
+
+--
+2.53.0
+
--- /dev/null
+From cf6f71c5bb5ee4fc40da6ce02bf583dc7a41fe2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:51:52 +0100
+Subject: MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
+
+Before the introduction of the EHINV feature, which lets software mark
+TLB entries invalid, certain older implementations of the MIPS ISA were
+equipped with an analogous bit, as a vendor extension, which however is
+hidden from software and only ever set at reset, and then any software
+write clears it, making the intended TLB entry valid.
+
+This feature makes it unsafe to read a TLB entry with TLBR, modify the
+page mask, and write the entry back with TLBWI, because this operation
+will implicitly clear the hidden bit and this may create a duplicate
+entry, as with the presence of the hidden bit there is no guarantee all
+the entries across the TLB are unique each.
+
+Usually the firmware has already uniquified TLB entries before handing
+control over, in which case we only need to guarantee at bootstrap no
+clash will happen with the VPN2 values chosen in local_flush_tlb_all().
+
+However with systems such as Mikrotik RB532 we get handed the TLB as at
+reset, with the hidden bit set across the entries and possibly duplicate
+entries present. This then causes a machine check exception when page
+sizes are reset in r4k_tlb_uniquify() and prevents the system from
+booting.
+
+Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
+the reuse of ASID/VPN values across the TLB. Get rid of global entries
+first as they may be blocking the entire address space, e.g. 16 256MiB
+pages will exhaust the whole address space of a 32-bit CPU and a single
+big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
+
+Details of the algorithm chosen are given across the code itself.
+
+Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Cc: stable@vger.kernel.org # v6.18+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
+ 1 file changed, 228 insertions(+), 54 deletions(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 18ae61b6f2b17..0cb68a5316010 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -13,6 +13,7 @@
+ #include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/memblock.h>
++#include <linux/minmax.h>
+ #include <linux/mm.h>
+ #include <linux/hugetlb.h>
+ #include <linux/export.h>
+@@ -24,6 +25,7 @@
+ #include <asm/hazards.h>
+ #include <asm/mmu_context.h>
+ #include <asm/tlb.h>
++#include <asm/tlbdebug.h>
+ #include <asm/tlbmisc.h>
+
+ extern void build_tlb_refill_handler(void);
+@@ -509,87 +511,259 @@ static int __init set_ntlb(char *str)
+ __setup("ntlb=", set_ntlb);
+
+
+-/* Comparison function for EntryHi VPN fields. */
+-static int r4k_vpn_cmp(const void *a, const void *b)
++/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
++#define VPN2_SHIFT 13
++
++/* Read full EntryHi even with CONFIG_32BIT. */
++static inline unsigned long long read_c0_entryhi_native(void)
++{
++ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
++}
++
++/* Write full EntryHi even with CONFIG_32BIT. */
++static inline void write_c0_entryhi_native(unsigned long long v)
+ {
+- long v = *(unsigned long *)a - *(unsigned long *)b;
+- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
+- return s ? (v != 0) | v >> s : v;
++ if (cpu_has_64bits)
++ write_c0_entryhi_64(v);
++ else
++ write_c0_entryhi(v);
+ }
+
++/* TLB entry state for uniquification. */
++struct tlbent {
++ unsigned long long wired:1;
++ unsigned long long global:1;
++ unsigned long long asid:10;
++ unsigned long long vpn:51;
++ unsigned long long pagesz:5;
++ unsigned long long index:14;
++};
++
+ /*
+- * Initialise all TLB entries with unique values that do not clash with
+- * what we have been handed over and what we'll be using ourselves.
++ * Comparison function for TLB entry sorting. Place wired entries first,
++ * then global entries, then order by the increasing VPN/ASID and the
++ * decreasing page size. This lets us avoid clashes with wired entries
++ * easily and get entries for larger pages out of the way first.
++ *
++ * We could group bits so as to reduce the number of comparisons, but this
++ * is seldom executed and not performance-critical, so prefer legibility.
+ */
+-static void __ref r4k_tlb_uniquify(void)
++static int r4k_entry_cmp(const void *a, const void *b)
+ {
+- int tlbsize = current_cpu_data.tlbsize;
+- bool use_slab = slab_is_available();
+- int start = num_wired_entries();
+- phys_addr_t tlb_vpn_size;
+- unsigned long *tlb_vpns;
+- unsigned long vpn_mask;
+- int cnt, ent, idx, i;
+-
+- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
+- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
++ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
++
++ if (ea.wired > eb.wired)
++ return -1;
++ else if (ea.wired < eb.wired)
++ return 1;
++ else if (ea.global > eb.global)
++ return -1;
++ else if (ea.global < eb.global)
++ return 1;
++ else if (ea.vpn < eb.vpn)
++ return -1;
++ else if (ea.vpn > eb.vpn)
++ return 1;
++ else if (ea.asid < eb.asid)
++ return -1;
++ else if (ea.asid > eb.asid)
++ return 1;
++ else if (ea.pagesz > eb.pagesz)
++ return -1;
++ else if (ea.pagesz < eb.pagesz)
++ return 1;
++ else
++ return 0;
++}
+
+- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+- tlb_vpns = (use_slab ?
+- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+- if (WARN_ON(!tlb_vpns))
+- return; /* Pray local_flush_tlb_all() is good enough. */
++/*
++ * Fetch all the TLB entries. Mask individual VPN values retrieved with
++ * the corresponding page mask and ignoring any 1KiB extension as we'll
++ * be using 4KiB pages for uniquification.
++ */
++static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
++{
++ int start = num_wired_entries();
++ unsigned long long vpn_mask;
++ bool global;
++ int i;
+
+- htw_stop();
++ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
++ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
+
+- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
+- unsigned long vpn;
++ for (i = 0; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn, mask, asid;
++ unsigned int pagesz;
+
+ write_c0_index(i);
+ mtc0_tlbr_hazard();
+ tlb_read();
+ tlb_read_hazard();
+- vpn = read_c0_entryhi();
+- vpn &= vpn_mask & PAGE_MASK;
+- tlb_vpns[cnt] = vpn;
+
+- /* Prevent any large pages from overlapping regular ones. */
+- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- tlbw_use_hazard();
++ global = !!(read_c0_entrylo0() & ENTRYLO_G);
++ entryhi = read_c0_entryhi_native();
++ mask = read_c0_pagemask();
++
++ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
++ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
++ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
++
++ tlb_vpns[i].global = global;
++ tlb_vpns[i].asid = global ? 0 : asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++ tlb_vpns[i].wired = i < start;
++ tlb_vpns[i].index = i;
+ }
++}
+
+- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
++/*
++ * Write unique values to all but the wired TLB entries each, using
++ * the 4KiB page size. This size might not be supported with R6, but
++ * EHINV is mandatory for R6, so we won't ever be called in that case.
++ *
++ * A sorted table is supplied with any wired entries at the beginning,
++ * followed by any global entries, and then finally regular entries.
++ * We start at the VPN and ASID values of zero and only assign user
++ * addresses, therefore guaranteeing no clash with addresses produced
++ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
++ * entries, by increasing the VPN value beyond the span of such entry.
++ *
++ * When a VPN/ASID clash is found with a regular entry we increment the
++ * ASID instead until no VPN/ASID clash has been found or the ASID space
++ * has been exhausted, in which case we increase the VPN value beyond
++ * the span of the largest clashing entry.
++ *
++ * We do not need to be concerned about FTLB or MMID configurations as
++ * those are required to implement the EHINV feature.
++ */
++static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
++{
++ unsigned long long asid, vpn, vpn_size, pagesz;
++ int widx, gidx, idx, sidx, lidx, i;
+
+- write_c0_pagemask(PM_DEFAULT_MASK);
++ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
++ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
++
++ write_c0_pagemask(PM_4K);
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+- idx = 0;
+- ent = tlbsize;
+- for (i = start; i < tlbsize; i++)
+- while (1) {
+- unsigned long entryhi, vpn;
++ asid = 0;
++ vpn = 0;
++ widx = 0;
++ gidx = 0;
++ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
++ ;
++ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
++ ;
++ idx = gidx = sidx + 1;
++ for (i = sidx; i < tlbsize; i++) {
++ unsigned long long entryhi, vpn_pagesz = 0;
+
+- entryhi = UNIQUE_ENTRYHI(ent);
+- vpn = entryhi & vpn_mask & PAGE_MASK;
++ while (1) {
++ if (WARN_ON(vpn >= vpn_size)) {
++ dump_tlb_all();
++ /* Pray local_flush_tlb_all() will cope. */
++ return;
++ }
+
+- if (idx >= cnt || vpn < tlb_vpns[idx]) {
+- write_c0_entryhi(entryhi);
+- write_c0_index(i);
+- mtc0_tlbw_hazard();
+- tlb_write_indexed();
+- ent++;
+- break;
+- } else if (vpn == tlb_vpns[idx]) {
+- ent++;
+- } else {
++ /* VPN must be below the next wired entry. */
++ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[widx].vpn +
++ (1ULL << tlb_vpns[widx].pagesz)));
++ asid = 0;
++ widx++;
++ continue;
++ }
++ /* VPN must be below the next global entry. */
++ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[gidx].vpn +
++ (1ULL << tlb_vpns[gidx].pagesz)));
++ asid = 0;
++ gidx++;
++ continue;
++ }
++ /* Try to find a free ASID so as to conserve VPNs. */
++ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid) {
++ unsigned long long idx_pagesz;
++
++ idx_pagesz = tlb_vpns[idx].pagesz;
++ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
++ do
++ idx++;
++ while (idx < tlbsize &&
++ vpn == tlb_vpns[idx].vpn &&
++ asid == tlb_vpns[idx].asid);
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += vpn_pagesz;
++ asid = 0;
++ vpn_pagesz = 0;
++ }
++ continue;
++ }
++ /* VPN mustn't be above the next regular entry. */
++ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
++ vpn = max(vpn,
++ (tlb_vpns[idx].vpn +
++ (1ULL << tlb_vpns[idx].pagesz)));
++ asid = 0;
+ idx++;
++ continue;
+ }
++ break;
+ }
+
++ entryhi = (vpn << VPN2_SHIFT) | asid;
++ write_c0_entryhi_native(entryhi);
++ write_c0_index(tlb_vpns[i].index);
++ mtc0_tlbw_hazard();
++ tlb_write_indexed();
++
++ tlb_vpns[i].asid = asid;
++ tlb_vpns[i].vpn = vpn;
++ tlb_vpns[i].pagesz = pagesz;
++
++ asid++;
++ if (asid > cpu_asid_mask(¤t_cpu_data)) {
++ vpn += 1ULL << pagesz;
++ asid = 0;
++ }
++ }
++}
++
++/*
++ * Initialise all TLB entries with unique values that do not clash with
++ * what we have been handed over and what we'll be using ourselves.
++ */
++static void __ref r4k_tlb_uniquify(void)
++{
++ int tlbsize = current_cpu_data.tlbsize;
++ bool use_slab = slab_is_available();
++ phys_addr_t tlb_vpn_size;
++ struct tlbent *tlb_vpns;
++
++ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
++ tlb_vpns = (use_slab ?
++ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
++ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
++ if (WARN_ON(!tlb_vpns))
++ return; /* Pray local_flush_tlb_all() is good enough. */
++
++ htw_stop();
++
++ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
++
++ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
++
++ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
++
++ write_c0_pagemask(PM_DEFAULT_MASK);
++
+ tlbw_use_hazard();
+ htw_start();
+ flush_micro_tlb();
+--
+2.53.0
+
--- /dev/null
+From d92e805d6cd497788d51d7d6a72d217c6ad6c26c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2026 01:51:51 +0100
+Subject: MIPS: mm: Suppress TLB uniquification on EHINV hardware
+
+From: Maciej W. Rozycki <macro@orcam.me.uk>
+
+commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
+
+Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
+implementation, lets software mark TLB entries invalid, which eliminates
+the need to ensure no duplicate matching entries are ever created. This
+feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
+macro, making the preceding call to r4k_tlb_uniquify() superfluous.
+
+The next change will also modify uniquification code such that it'll
+become incompatible with the FTLB and MMID features, as well as MIPSr6
+CPUs that do not implement 4KiB pages.
+
+Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
+as denoted by `cpu_has_tlbinv'.
+
+Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/mm/tlb-r4k.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 4d49ecf276103..18ae61b6f2b17 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -638,7 +638,8 @@ static void r4k_tlb_configure(void)
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+ /* From this point on the ARC firmware is dead. */
+- r4k_tlb_uniquify();
++ if (!cpu_has_tlbinv)
++ r4k_tlb_uniquify();
+ local_flush_tlb_all();
+
+ /* Did I tell you that ARC SUCKS? */
+--
+2.53.0
+
--- /dev/null
+From 419e271f9f56090d84073eb4449a1650d5193625 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2026 14:01:47 +0800
+Subject: scsi: ufs: core: Fix use-after free in init error and remove paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Draszik <andre.draszik@linaro.org>
+
+[ Upstream commit f8fb2403ddebb5eea0033d90d9daae4c88749ada ]
+
+devm_blk_crypto_profile_init() registers a cleanup handler to run when
+the associated (platform-) device is being released. For UFS, the
+crypto private data and pointers are stored as part of the ufs_hba's
+data structure 'struct ufs_hba::crypto_profile'. This structure is
+allocated as part of the underlying ufshcd and therefore Scsi_host
+allocation.
+
+During driver release or during error handling in ufshcd_pltfrm_init(),
+this structure is released as part of ufshcd_dealloc_host() before the
+(platform-) device associated with the crypto call above is released.
+Once this device is released, the crypto cleanup code will run, using
+the just-released 'struct ufs_hba::crypto_profile'. This causes a
+use-after-free situation:
+
+ Call trace:
+ kfree+0x60/0x2d8 (P)
+ kvfree+0x44/0x60
+ blk_crypto_profile_destroy_callback+0x28/0x70
+ devm_action_release+0x1c/0x30
+ release_nodes+0x6c/0x108
+ devres_release_all+0x98/0x100
+ device_unbind_cleanup+0x20/0x70
+ really_probe+0x218/0x2d0
+
+In other words, the initialisation code flow is:
+
+ platform-device probe
+ ufshcd_pltfrm_init()
+ ufshcd_alloc_host()
+ scsi_host_alloc()
+ allocation of struct ufs_hba
+ creation of scsi-host devices
+ devm_blk_crypto_profile_init()
+ devm registration of cleanup handler using platform-device
+
+and during error handling of ufshcd_pltfrm_init() or during driver
+removal:
+
+ ufshcd_dealloc_host()
+ scsi_host_put()
+ put_device(scsi-host)
+ release of struct ufs_hba
+ put_device(platform-device)
+ crypto cleanup handler
+
+To fix this use-after free, change ufshcd_alloc_host() to register a
+devres action to automatically cleanup the underlying SCSI device on
+ufshcd destruction, without requiring explicit calls to
+ufshcd_dealloc_host(). This way:
+
+ * the crypto profile and all other ufs_hba-owned resources are
+ destroyed before SCSI (as they've been registered after)
+ * a memleak is plugged in tc-dwc-g210-pci.c remove() as a
+ side-effect
+ * EXPORT_SYMBOL_GPL(ufshcd_dealloc_host) can be removed fully as
+ it's not needed anymore
+ * no future drivers using ufshcd_alloc_host() could ever forget
+ adding the cleanup
+
+Fixes: cb77cb5abe1f ("blk-crypto: rename blk_keyslot_manager to blk_crypto_profile")
+Fixes: d76d9d7d1009 ("scsi: ufs: use devm_blk_ksm_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: André Draszik <andre.draszik@linaro.org>
+Link: https://lore.kernel.org/r/20250124-ufshcd-fix-v4-1-c5d0144aae59@linaro.org
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Acked-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[ Delete modifications about ufshcd_parse_operating_points() for it's added from
+commit 72208ebe181e3("scsi: ufs: core: Add support for parsing OPP")
+and that in ufshcd_pltfrm_remove() for it's added from commit
+897df60c16d54("scsi: ufs: pltfrm: Dellocate HBA during ufshcd_pltfrm_remove()"). ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 31 +++++++++++++++++++++----------
+ drivers/ufs/host/ufshcd-pci.c | 2 --
+ drivers/ufs/host/ufshcd-pltfrm.c | 25 ++++++++-----------------
+ include/ufs/ufshcd.h | 1 -
+ 4 files changed, 29 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 2dcb0146c17e3..f6aada5150f9c 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10475,16 +10475,6 @@ int ufshcd_system_thaw(struct device *dev)
+ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+ #endif /* CONFIG_PM_SLEEP */
+
+-/**
+- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+- * @hba: pointer to Host Bus Adapter (HBA)
+- */
+-void ufshcd_dealloc_host(struct ufs_hba *hba)
+-{
+- scsi_host_put(hba->host);
+-}
+-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+-
+ /**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+@@ -10503,12 +10493,26 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+ }
+
++/**
++ * ufshcd_devres_release - devres cleanup handler, invoked during release of
++ * hba->dev
++ * @host: pointer to SCSI host
++ */
++static void ufshcd_devres_release(void *host)
++{
++ scsi_host_put(host);
++}
++
+ /**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ *
+ * Return: 0 on success, non-zero value on failure.
++ *
++ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
++ * keeps track of its allocations using devres and deallocates everything on
++ * device removal automatically.
+ */
+ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ {
+@@ -10530,6 +10534,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ err = -ENOMEM;
+ goto out_error;
+ }
++
++ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
++ host);
++ if (err)
++ return dev_err_probe(dev, err,
++ "failed to add ufshcd dealloc action\n");
++
+ host->nr_maps = HCTX_TYPE_POLL + 1;
+ hba = shost_priv(host);
+ hba->host = host;
+diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
+index 4ecaaf52b3e95..3ff799497191f 100644
+--- a/drivers/ufs/host/ufshcd-pci.c
++++ b/drivers/ufs/host/ufshcd-pci.c
+@@ -620,7 +620,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+- ufshcd_dealloc_host(hba);
+ }
+
+ /**
+@@ -665,7 +664,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+- ufshcd_dealloc_host(hba);
+ return err;
+ }
+
+diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
+index 797a4dfe45d90..0112ecbebe46a 100644
+--- a/drivers/ufs/host/ufshcd-pltfrm.c
++++ b/drivers/ufs/host/ufshcd-pltfrm.c
+@@ -339,21 +339,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ struct device *dev = &pdev->dev;
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(mmio_base)) {
+- err = PTR_ERR(mmio_base);
+- goto out;
+- }
++ if (IS_ERR(mmio_base))
++ return PTR_ERR(mmio_base);
+
+ irq = platform_get_irq(pdev, 0);
+- if (irq < 0) {
+- err = irq;
+- goto out;
+- }
++ if (irq < 0)
++ return irq;
+
+ err = ufshcd_alloc_host(dev, &hba);
+ if (err) {
+ dev_err(dev, "Allocation failed\n");
+- goto out;
++ return err;
+ }
+
+ hba->vops = vops;
+@@ -362,13 +358,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ if (err) {
+ dev_err(dev, "%s: clock parse failed %d\n",
+ __func__, err);
+- goto dealloc_host;
++ return err;
+ }
+ err = ufshcd_parse_regulator_info(hba);
+ if (err) {
+ dev_err(dev, "%s: regulator init failed %d\n",
+ __func__, err);
+- goto dealloc_host;
++ return err;
+ }
+
+ ufshcd_init_lanes_per_dir(hba);
+@@ -377,18 +373,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ if (err) {
+ dev_err_probe(dev, err, "Initialization failed with error %d\n",
+ err);
+- goto dealloc_host;
++ return err;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+-
+-dealloc_host:
+- ufshcd_dealloc_host(hba);
+-out:
+- return err;
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 8d2efb9e5d664..60942ed0040f1 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -1286,7 +1286,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+ }
+
+ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+-void ufshcd_dealloc_host(struct ufs_hba *);
+ int ufshcd_hba_enable(struct ufs_hba *hba);
+ int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+ int ufshcd_link_recovery(struct ufs_hba *hba);
+--
+2.53.0
+
mptcp-fix-slab-use-after-free-in-__inet_lookup_established.patch
input-uinput-fix-circular-locking-dependency-with-ff-core.patch
input-uinput-take-event-lock-when-submitting-ff-request-event.patch
+mips-always-record-segbits-in-cpu_data.vmbits.patch
+mips-mm-suppress-tlb-uniquification-on-ehinv-hardwar.patch
+mips-mm-rewrite-tlb-uniquification-for-the-hidden-bi.patch
+asoc-simple-card-utils-don-t-use-__free-device_node-.patch
+scsi-ufs-core-fix-use-after-free-in-init-error-and-r.patch