--- /dev/null
+From adb9743d6a08778b78d62d16b4230346d3508986 Mon Sep 17 00:00:00 2001
+From: Qi Zheng <zhengqi.arch@bytedance.com>
+Date: Sun, 25 Jun 2023 15:49:37 +0000
+Subject: binder: fix memory leak in binder_init()
+
+From: Qi Zheng <zhengqi.arch@bytedance.com>
+
+commit adb9743d6a08778b78d62d16b4230346d3508986 upstream.
+
+In binder_init(), the destruction of binder_alloc_shrinker_init() is not
+performed in the wrong path, which will cause memory leaks. So this commit
+introduces binder_alloc_shrinker_exit() and calls it in the wrong path to
+fix that.
+
+Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
+Acked-by: Carlos Llamas <cmllamas@google.com>
+Fixes: f2517eb76f1f ("android: binder: Add global lru shrinker to binder")
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230625154937.64316-1-qi.zheng@linux.dev
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder.c | 1 +
+ drivers/android/binder_alloc.c | 6 ++++++
+ drivers/android/binder_alloc.h | 1 +
+ 3 files changed, 8 insertions(+)
+
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6602,6 +6602,7 @@ err_init_binder_device_failed:
+
+ err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
++ binder_alloc_shrinker_exit();
+
+ return ret;
+ }
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -1087,6 +1087,12 @@ int binder_alloc_shrinker_init(void)
+ return ret;
+ }
+
++void binder_alloc_shrinker_exit(void)
++{
++ unregister_shrinker(&binder_shrinker);
++ list_lru_destroy(&binder_alloc_lru);
++}
++
+ /**
+ * check_buffer() - verify that buffer/offset is safe to access
+ * @alloc: binder_alloc for this proc
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -129,6 +129,7 @@ extern struct binder_buffer *binder_allo
+ int pid);
+ extern void binder_alloc_init(struct binder_alloc *alloc);
+ extern int binder_alloc_shrinker_init(void);
++extern void binder_alloc_shrinker_exit(void);
+ extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+ extern struct binder_buffer *
+ binder_alloc_prepare_to_free(struct binder_alloc *alloc,
--- /dev/null
+From d5712cd22b9cf109fded1b7f178f4c1888c8b84b Mon Sep 17 00:00:00 2001
+From: Karol Herbst <kherbst@redhat.com>
+Date: Sat, 5 Aug 2023 12:18:13 +0200
+Subject: drm/nouveau/disp: Revert a NULL check inside nouveau_connector_get_modes
+
+From: Karol Herbst <kherbst@redhat.com>
+
+commit d5712cd22b9cf109fded1b7f178f4c1888c8b84b upstream.
+
+The original commit adding that check tried to protect the kenrel against
+a potential invalid NULL pointer access.
+
+However we call nouveau_connector_detect_depth once without a native_mode
+set on purpose for non LVDS connectors and this broke DP support in a few
+cases.
+
+Cc: Olaf Skibbe <news@kravcenko.com>
+Cc: Lyude Paul <lyude@redhat.com>
+Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/238
+Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/245
+Fixes: 20a2ce87fbaf8 ("drm/nouveau/dp: check for NULL nv_connector->native_mode")
+Signed-off-by: Karol Herbst <kherbst@redhat.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230805101813.2603989-1-kherbst@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_c
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
++ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
+ /* Find the native mode if this is a digital panel, if we didn't
--- /dev/null
+From 6bc471b6c3aeaa7b95d1b86a1bb8d91a3c341fa5 Mon Sep 17 00:00:00 2001
+From: Alisa Roman <alisa.roman@analog.com>
+Date: Wed, 14 Jun 2023 18:52:43 +0300
+Subject: iio: adc: ad7192: Fix ac excitation feature
+
+From: Alisa Roman <alisa.roman@analog.com>
+
+commit 6bc471b6c3aeaa7b95d1b86a1bb8d91a3c341fa5 upstream.
+
+AC excitation enable feature exposed to user on AD7192, allowing a bit
+which should be 0 to be set. This feature is specific only to AD7195. AC
+excitation attribute moved accordingly.
+
+In the AD7195 documentation, the AC excitation enable bit is on position
+22 in the Configuration register. ACX macro changed to match correct
+register and bit.
+
+Note that the fix tag is for the commit that moved the driver out of
+staging.
+
+Fixes: b581f748cce0 ("staging: iio: adc: ad7192: move out of staging")
+Signed-off-by: Alisa Roman <alisa.roman@analog.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nuno Sa <nuno.sa@analog.com>
+Link: https://lore.kernel.org/r/20230614155242.160296-1-alisa.roman@analog.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ad7192.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -62,7 +62,6 @@
+ #define AD7192_MODE_STA_MASK BIT(20) /* Status Register transmission Mask */
+ #define AD7192_MODE_CLKSRC(x) (((x) & 0x3) << 18) /* Clock Source Select */
+ #define AD7192_MODE_SINC3 BIT(15) /* SINC3 Filter Select */
+-#define AD7192_MODE_ACX BIT(14) /* AC excitation enable(AD7195 only)*/
+ #define AD7192_MODE_ENPAR BIT(13) /* Parity Enable */
+ #define AD7192_MODE_CLKDIV BIT(12) /* Clock divide by 2 (AD7190/2 only)*/
+ #define AD7192_MODE_SCYCLE BIT(11) /* Single cycle conversion */
+@@ -91,6 +90,7 @@
+ /* Configuration Register Bit Designations (AD7192_REG_CONF) */
+
+ #define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
++#define AD7192_CONF_ACX BIT(22) /* AC excitation enable(AD7195 only) */
+ #define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
+ #define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */
+ #define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */
+@@ -473,7 +473,7 @@ static ssize_t ad7192_show_ac_excitation
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+- return sysfs_emit(buf, "%d\n", !!(st->mode & AD7192_MODE_ACX));
++ return sysfs_emit(buf, "%d\n", !!(st->conf & AD7192_CONF_ACX));
+ }
+
+ static ssize_t ad7192_show_bridge_switch(struct device *dev,
+@@ -514,13 +514,13 @@ static ssize_t ad7192_set(struct device
+
+ ad_sd_write_reg(&st->sd, AD7192_REG_GPOCON, 1, st->gpocon);
+ break;
+- case AD7192_REG_MODE:
++ case AD7192_REG_CONF:
+ if (val)
+- st->mode |= AD7192_MODE_ACX;
++ st->conf |= AD7192_CONF_ACX;
+ else
+- st->mode &= ~AD7192_MODE_ACX;
++ st->conf &= ~AD7192_CONF_ACX;
+
+- ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
++ ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
+ break;
+ default:
+ ret = -EINVAL;
+@@ -580,12 +580,11 @@ static IIO_DEVICE_ATTR(bridge_switch_en,
+
+ static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
+ ad7192_show_ac_excitation, ad7192_set,
+- AD7192_REG_MODE);
++ AD7192_REG_CONF);
+
+ static struct attribute *ad7192_attributes[] = {
+ &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
+- &iio_dev_attr_ac_excitation_en.dev_attr.attr,
+ NULL
+ };
+
+@@ -596,6 +595,7 @@ static const struct attribute_group ad71
+ static struct attribute *ad7195_attributes[] = {
+ &iio_dev_attr_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_bridge_switch_en.dev_attr.attr,
++ &iio_dev_attr_ac_excitation_en.dev_attr.attr,
+ NULL
+ };
+
--- /dev/null
+From a41e19cc0d6b6a445a4133170b90271e4a2553dc Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Alvin=20=C5=A0ipraga?= <alsi@bang-olufsen.dk>
+Date: Mon, 19 Jun 2023 16:12:39 +0200
+Subject: iio: adc: ina2xx: avoid NULL pointer dereference on OF device match
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alvin Šipraga <alsi@bang-olufsen.dk>
+
+commit a41e19cc0d6b6a445a4133170b90271e4a2553dc upstream.
+
+The affected lines were resulting in a NULL pointer dereference on our
+platform because the device tree contained the following list of
+compatible strings:
+
+ power-sensor@40 {
+ compatible = "ti,ina232", "ti,ina231";
+ ...
+ };
+
+Since the driver doesn't declare a compatible string "ti,ina232", the OF
+matching succeeds on "ti,ina231". But the I2C device ID info is
+populated via the first compatible string, cf. modalias population in
+of_i2c_get_board_info(). Since there is no "ina232" entry in the legacy
+I2C device ID table either, the struct i2c_device_id *id pointer in the
+probe function is NULL.
+
+Fix this by using the already populated type variable instead, which
+points to the proper driver data. Since the name is also wanted, add a
+generic one to the ina2xx_config table.
+
+Signed-off-by: Alvin Šipraga <alsi@bang-olufsen.dk>
+Fixes: c43a102e67db ("iio: ina2xx: add support for TI INA2xx Power Monitors")
+Link: https://lore.kernel.org/r/20230619141239.2257392-1-alvin@pqrs.dk
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/adc/ina2xx-adc.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/adc/ina2xx-adc.c
++++ b/drivers/iio/adc/ina2xx-adc.c
+@@ -124,6 +124,7 @@ static const struct regmap_config ina2xx
+ enum ina2xx_ids { ina219, ina226 };
+
+ struct ina2xx_config {
++ const char *name;
+ u16 config_default;
+ int calibration_value;
+ int shunt_voltage_lsb; /* nV */
+@@ -155,6 +156,7 @@ struct ina2xx_chip_info {
+
+ static const struct ina2xx_config ina2xx_config[] = {
+ [ina219] = {
++ .name = "ina219",
+ .config_default = INA219_CONFIG_DEFAULT,
+ .calibration_value = 4096,
+ .shunt_voltage_lsb = 10000,
+@@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx
+ .chip_id = ina219,
+ },
+ [ina226] = {
++ .name = "ina226",
+ .config_default = INA226_CONFIG_DEFAULT,
+ .calibration_value = 2048,
+ .shunt_voltage_lsb = 2500,
+@@ -996,7 +999,7 @@ static int ina2xx_probe(struct i2c_clien
+ /* Patch the current config register with default. */
+ val = chip->config->config_default;
+
+- if (id->driver_data == ina226) {
++ if (type == ina226) {
+ ina226_set_average(chip, INA226_DEFAULT_AVG, &val);
+ ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val);
+ ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val);
+@@ -1015,7 +1018,7 @@ static int ina2xx_probe(struct i2c_clien
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+- if (id->driver_data == ina226) {
++ if (type == ina226) {
+ indio_dev->channels = ina226_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ina226_channels);
+ indio_dev->info = &ina226_info;
+@@ -1024,7 +1027,7 @@ static int ina2xx_probe(struct i2c_clien
+ indio_dev->num_channels = ARRAY_SIZE(ina219_channels);
+ indio_dev->info = &ina219_info;
+ }
+- indio_dev->name = id->name;
++ indio_dev->name = id ? id->name : chip->config->name;
+
+ ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev,
+ &ina2xx_setup_ops);
--- /dev/null
+From b2a69969908fcaf68596dfc04369af0fe2e1d2f7 Mon Sep 17 00:00:00 2001
+From: Milan Zamazal <mzamazal@redhat.com>
+Date: Wed, 19 Jul 2023 10:32:08 +0200
+Subject: iio: core: Prevent invalid memory access when there is no parent
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Milan Zamazal <mzamazal@redhat.com>
+
+commit b2a69969908fcaf68596dfc04369af0fe2e1d2f7 upstream.
+
+Commit 813665564b3d ("iio: core: Convert to use firmware node handle
+instead of OF node") switched the kind of nodes to use for label
+retrieval in device registration. Probably an unwanted change in that
+commit was that if the device has no parent then NULL pointer is
+accessed. This is what happens in the stock IIO dummy driver when a
+new entry is created in configfs:
+
+ # mkdir /sys/kernel/config/iio/devices/dummy/foo
+ BUG: kernel NULL pointer dereference, address: ...
+ ...
+ Call Trace:
+ __iio_device_register
+ iio_dummy_probe
+
+Since there seems to be no reason to make a parent device of an IIO
+dummy device mandatory, let’s prevent the invalid memory access in
+__iio_device_register when the parent device is NULL. With this
+change, the IIO dummy driver works fine with configfs.
+
+Fixes: 813665564b3d ("iio: core: Convert to use firmware node handle instead of OF node")
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Milan Zamazal <mzamazal@redhat.com>
+Link: https://lore.kernel.org/r/20230719083208.88149-1-mzamazal@redhat.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/industrialio-core.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1916,7 +1916,7 @@ static const struct iio_buffer_setup_ops
+ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
+ {
+ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+- struct fwnode_handle *fwnode;
++ struct fwnode_handle *fwnode = NULL;
+ int ret;
+
+ if (!indio_dev->info)
+@@ -1927,7 +1927,8 @@ int __iio_device_register(struct iio_dev
+ /* If the calling driver did not initialize firmware node, do it here */
+ if (dev_fwnode(&indio_dev->dev))
+ fwnode = dev_fwnode(&indio_dev->dev);
+- else
++ /* The default dummy IIO device has no parent */
++ else if (indio_dev->dev.parent)
+ fwnode = dev_fwnode(indio_dev->dev.parent);
+ device_set_node(&indio_dev->dev, fwnode);
+
--- /dev/null
+From 8a4629055ef55177b5b63dab1ecce676bd8cccdd Mon Sep 17 00:00:00 2001
+From: Yiyuan Guo <yguoaz@gmail.com>
+Date: Fri, 30 Jun 2023 22:37:19 +0800
+Subject: iio: cros_ec: Fix the allocation size for cros_ec_command
+
+From: Yiyuan Guo <yguoaz@gmail.com>
+
+commit 8a4629055ef55177b5b63dab1ecce676bd8cccdd upstream.
+
+The struct cros_ec_command contains several integer fields and a
+trailing array. An allocation size neglecting the integer fields can
+lead to buffer overrun.
+
+Reviewed-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Signed-off-by: Yiyuan Guo <yguoaz@gmail.com>
+Fixes: 974e6f02e27e ("iio: cros_ec_sensors_core: Add common functions for the ChromeOS EC Sensor Hub.")
+Link: https://lore.kernel.org/r/20230630143719.1513906-1-yguoaz@gmail.com
+Cc: <Stable@vger.kerenl.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -253,7 +253,7 @@ int cros_ec_sensors_core_init(struct pla
+ platform_set_drvdata(pdev, indio_dev);
+
+ state->ec = ec->ec_dev;
+- state->msg = devm_kzalloc(&pdev->dev,
++ state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
+ max((u16)sizeof(struct ec_params_motion_sense),
+ state->ec->max_response), GFP_KERNEL);
+ if (!state->msg)
--- /dev/null
+From 507397d19b5a296aa339f7a1bd16284f668a1906 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@linaro.org>
+Date: Tue, 18 Jul 2023 10:02:18 +0300
+Subject: iio: frequency: admv1013: propagate errors from regulator_get_voltage()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+commit 507397d19b5a296aa339f7a1bd16284f668a1906 upstream.
+
+The regulator_get_voltage() function returns negative error codes.
+This function saves it to an unsigned int and then does some range
+checking and, since the error code falls outside the correct range,
+it returns -EINVAL.
+
+Beyond the messiness, this is bad because the regulator_get_voltage()
+function can return -EPROBE_DEFER and it's important to propagate that
+back properly so it can be handled.
+
+Fixes: da35a7b526d9 ("iio: frequency: admv1013: add support for ADMV1013")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/ce75aac3-2aba-4435-8419-02e59fdd862b@moroto.mountain
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iio/frequency/admv1013.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/frequency/admv1013.c
++++ b/drivers/iio/frequency/admv1013.c
+@@ -344,9 +344,12 @@ static int admv1013_update_quad_filters(
+
+ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+ {
+- unsigned int vcm, mixer_vgate;
++ unsigned int mixer_vgate;
++ int vcm;
+
+ vcm = regulator_get_voltage(st->reg);
++ if (vcm < 0)
++ return vcm;
+
+ if (vcm < 1800000)
+ mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
--- /dev/null
+From 101bd907b4244a726980ee67f95ed9cafab6ff7a Mon Sep 17 00:00:00 2001
+From: Ricky WU <ricky_wu@realtek.com>
+Date: Tue, 25 Jul 2023 09:10:54 +0000
+Subject: misc: rtsx: judge ASPM Mode to set PETXCFG Reg
+
+From: Ricky WU <ricky_wu@realtek.com>
+
+commit 101bd907b4244a726980ee67f95ed9cafab6ff7a upstream.
+
+ASPM Mode is ASPM_MODE_CFG need to judge the value of clkreq_0
+to set HIGH or LOW, if the ASPM Mode is ASPM_MODE_REG
+always set to HIGH during the initialization.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ricky Wu <ricky_wu@realtek.com>
+Link: https://lore.kernel.org/r/52906c6836374c8cb068225954c5543a@realtek.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/cardreader/rts5227.c | 2 +-
+ drivers/misc/cardreader/rts5228.c | 18 ------------------
+ drivers/misc/cardreader/rts5249.c | 3 +--
+ drivers/misc/cardreader/rts5260.c | 18 ------------------
+ drivers/misc/cardreader/rts5261.c | 18 ------------------
+ drivers/misc/cardreader/rtsx_pcr.c | 5 ++++-
+ 6 files changed, 6 insertions(+), 58 deletions(-)
+
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -195,7 +195,7 @@ static int rts5227_extra_init_hw(struct
+ }
+ }
+
+- if (option->force_clkreq_0)
++ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+--- a/drivers/misc/cardreader/rts5228.c
++++ b/drivers/misc/cardreader/rts5228.c
+@@ -435,17 +435,10 @@ static void rts5228_init_from_cfg(struct
+ option->ltr_enabled = false;
+ }
+ }
+-
+- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+- | PM_L1_1_EN | PM_L1_2_EN))
+- option->force_clkreq_0 = false;
+- else
+- option->force_clkreq_0 = true;
+ }
+
+ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+- struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+@@ -476,17 +469,6 @@ static int rts5228_extra_init_hw(struct
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+- /*
+- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+- * to drive low, and we forcibly request clock.
+- */
+- if (option->force_clkreq_0)
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+- else
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+
+ if (pcr->rtd3_en) {
+--- a/drivers/misc/cardreader/rts5249.c
++++ b/drivers/misc/cardreader/rts5249.c
+@@ -327,12 +327,11 @@ static int rts5249_extra_init_hw(struct
+ }
+ }
+
+-
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+- if (option->force_clkreq_0)
++ if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+--- a/drivers/misc/cardreader/rts5260.c
++++ b/drivers/misc/cardreader/rts5260.c
+@@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct
+ option->ltr_enabled = false;
+ }
+ }
+-
+- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+- | PM_L1_1_EN | PM_L1_2_EN))
+- option->force_clkreq_0 = false;
+- else
+- option->force_clkreq_0 = true;
+ }
+
+ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+- struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+@@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct
+
+ rts5260_init_hw(pcr);
+
+- /*
+- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+- * to drive low, and we forcibly request clock.
+- */
+- if (option->force_clkreq_0)
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+- else
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+
+ return 0;
+--- a/drivers/misc/cardreader/rts5261.c
++++ b/drivers/misc/cardreader/rts5261.c
+@@ -498,17 +498,10 @@ static void rts5261_init_from_cfg(struct
+ option->ltr_enabled = false;
+ }
+ }
+-
+- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+- | PM_L1_1_EN | PM_L1_2_EN))
+- option->force_clkreq_0 = false;
+- else
+- option->force_clkreq_0 = true;
+ }
+
+ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+- struct rtsx_cr_option *option = &pcr->option;
+ u32 val;
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+@@ -554,17 +547,6 @@ static int rts5261_extra_init_hw(struct
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+- /*
+- * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+- * to drive low, and we forcibly request clock.
+- */
+- if (option->force_clkreq_0)
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+- else
+- rtsx_pci_write_register(pcr, PETXCFG,
+- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+-
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
+
+ if (pcr->rtd3_en) {
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1326,8 +1326,11 @@ static int rtsx_pci_init_hw(struct rtsx_
+ return err;
+ }
+
+- if (pcr->aspm_mode == ASPM_MODE_REG)
++ if (pcr->aspm_mode == ASPM_MODE_REG) {
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
++ rtsx_pci_write_register(pcr, PETXCFG,
++ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
++ }
+
+ /* No CD interrupt if probing driver with card inserted.
+ * So we need to initialize pcr->card_exist here.
--- /dev/null
+From f6c383b8c31a93752a52697f8430a71dcbc46adf Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Wed, 9 Aug 2023 14:54:23 +0200
+Subject: netfilter: nf_tables: adapt set backend to use GC transaction API
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit f6c383b8c31a93752a52697f8430a71dcbc46adf upstream.
+
+Use the GC transaction API to replace the old and buggy gc API and the
+busy mark approach.
+
+No set elements are removed from async garbage collection anymore,
+instead the _DEAD bit is set on so the set element is not visible from
+lookup path anymore. Async GC enqueues transaction work that might be
+aborted and retried later.
+
+rbtree and pipapo set backends does not set on the _DEAD bit from the
+sync GC path since this runs in control plane path where mutex is held.
+In this case, set elements are deactivated, removed and then released
+via RCU callback, sync GC never fails.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Fixes: 8d8540c4f5e0 ("netfilter: nft_set_rbtree: add timeout support")
+Fixes: 9d0982927e79 ("netfilter: nft_hash: add support for timeouts")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 7 -
+ net/netfilter/nft_set_hash.c | 77 +++++++++++++--------
+ net/netfilter/nft_set_pipapo.c | 48 ++++++++++---
+ net/netfilter/nft_set_rbtree.c | 146 ++++++++++++++++++++++++-----------------
+ 4 files changed, 174 insertions(+), 104 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6128,7 +6128,6 @@ static void nft_setelem_activate(struct
+
+ if (nft_setelem_is_catchall(set, elem)) {
+ nft_set_elem_change_active(net, set, ext);
+- nft_set_elem_clear_busy(ext);
+ } else {
+ set->ops->activate(net, set, elem);
+ }
+@@ -6143,8 +6142,7 @@ static int nft_setelem_catchall_deactiva
+
+ list_for_each_entry(catchall, &set->catchall_list, list) {
+ ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_is_active(net, ext) ||
+- nft_set_elem_mark_busy(ext))
++ if (!nft_is_active(net, ext))
+ continue;
+
+ kfree(elem->priv);
+@@ -6847,8 +6845,7 @@ static int nft_set_catchall_flush(const
+
+ list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ ext = nft_set_elem_ext(set, catchall->elem);
+- if (!nft_set_elem_active(ext, genmask) ||
+- nft_set_elem_mark_busy(ext))
++ if (!nft_set_elem_active(ext, genmask))
+ continue;
+
+ elem.priv = catchall->elem;
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -59,6 +59,8 @@ static inline int nft_rhash_cmp(struct r
+
+ if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
+ return 1;
++ if (nft_set_elem_is_dead(&he->ext))
++ return 1;
+ if (nft_set_elem_expired(&he->ext))
+ return 1;
+ if (!nft_set_elem_active(&he->ext, x->genmask))
+@@ -188,7 +190,6 @@ static void nft_rhash_activate(const str
+ struct nft_rhash_elem *he = elem->priv;
+
+ nft_set_elem_change_active(net, set, &he->ext);
+- nft_set_elem_clear_busy(&he->ext);
+ }
+
+ static bool nft_rhash_flush(const struct net *net,
+@@ -196,12 +197,9 @@ static bool nft_rhash_flush(const struct
+ {
+ struct nft_rhash_elem *he = priv;
+
+- if (!nft_set_elem_mark_busy(&he->ext) ||
+- !nft_is_active(net, &he->ext)) {
+- nft_set_elem_change_active(net, set, &he->ext);
+- return true;
+- }
+- return false;
++ nft_set_elem_change_active(net, set, &he->ext);
++
++ return true;
+ }
+
+ static void *nft_rhash_deactivate(const struct net *net,
+@@ -218,9 +216,8 @@ static void *nft_rhash_deactivate(const
+
+ rcu_read_lock();
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
+- if (he != NULL &&
+- !nft_rhash_flush(net, set, he))
+- he = NULL;
++ if (he)
++ nft_set_elem_change_active(net, set, &he->ext);
+
+ rcu_read_unlock();
+
+@@ -312,25 +309,48 @@ static bool nft_rhash_expr_needs_gc_run(
+
+ static void nft_rhash_gc(struct work_struct *work)
+ {
++ struct nftables_pernet *nft_net;
+ struct nft_set *set;
+ struct nft_rhash_elem *he;
+ struct nft_rhash *priv;
+- struct nft_set_gc_batch *gcb = NULL;
+ struct rhashtable_iter hti;
++ struct nft_trans_gc *gc;
++ struct net *net;
++ u32 gc_seq;
+
+ priv = container_of(work, struct nft_rhash, gc_work.work);
+ set = nft_set_container_of(priv);
++ net = read_pnet(&set->net);
++ nft_net = nft_pernet(net);
++ gc_seq = READ_ONCE(nft_net->gc_seq);
++
++ gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
++ if (!gc)
++ goto done;
+
+ rhashtable_walk_enter(&priv->ht, &hti);
+ rhashtable_walk_start(&hti);
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+- if (PTR_ERR(he) != -EAGAIN)
+- break;
++ if (PTR_ERR(he) != -EAGAIN) {
++ nft_trans_gc_destroy(gc);
++ gc = NULL;
++ goto try_later;
++ }
+ continue;
+ }
+
++ /* Ruleset has been updated, try later. */
++ if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
++ nft_trans_gc_destroy(gc);
++ gc = NULL;
++ goto try_later;
++ }
++
++ if (nft_set_elem_is_dead(&he->ext))
++ goto dead_elem;
++
+ if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
+ nft_rhash_expr_needs_gc_run(set, &he->ext))
+ goto needs_gc_run;
+@@ -338,26 +358,26 @@ static void nft_rhash_gc(struct work_str
+ if (!nft_set_elem_expired(&he->ext))
+ continue;
+ needs_gc_run:
+- if (nft_set_elem_mark_busy(&he->ext))
+- continue;
++ nft_set_elem_dead(&he->ext);
++dead_elem:
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++ if (!gc)
++ goto try_later;
+
+- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+- if (gcb == NULL)
+- break;
+- rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
+- atomic_dec(&set->nelems);
+- nft_set_gc_batch_add(gcb, he);
++ nft_trans_gc_elem_add(gc, he);
+ }
++
++ gc = nft_trans_gc_catchall(gc, gc_seq);
++
++try_later:
++ /* catchall list iteration requires rcu read side lock. */
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+- he = nft_set_catchall_gc(set);
+- if (he) {
+- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+- if (gcb)
+- nft_set_gc_batch_add(gcb, he);
+- }
+- nft_set_gc_batch_complete(gcb);
++ if (gc)
++ nft_trans_gc_queue_async_done(gc);
++
++done:
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+ }
+@@ -420,7 +440,6 @@ static void nft_rhash_destroy(const stru
+ };
+
+ cancel_delayed_work_sync(&priv->gc_work);
+- rcu_barrier();
+ rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
+ (void *)&rhash_ctx);
+ }
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1537,16 +1537,34 @@ static void pipapo_drop(struct nft_pipap
+ }
+ }
+
++static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
++ struct nft_pipapo_elem *e)
++
++{
++ struct nft_set_elem elem = {
++ .priv = e,
++ };
++
++ nft_setelem_data_deactivate(net, set, &elem);
++}
++
+ /**
+ * pipapo_gc() - Drop expired entries from set, destroy start and end elements
+ * @set: nftables API set representation
+ * @m: Matching data
+ */
+-static void pipapo_gc(const struct nft_set *set, struct nft_pipapo_match *m)
++static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
+ {
++ struct nft_set *set = (struct nft_set *) _set;
+ struct nft_pipapo *priv = nft_set_priv(set);
++ struct net *net = read_pnet(&set->net);
+ int rules_f0, first_rule = 0;
+ struct nft_pipapo_elem *e;
++ struct nft_trans_gc *gc;
++
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
++ if (!gc)
++ return;
+
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+@@ -1570,13 +1588,20 @@ static void pipapo_gc(const struct nft_s
+ f--;
+ i--;
+ e = f->mt[rulemap[i].to].e;
+- if (nft_set_elem_expired(&e->ext) &&
+- !nft_set_elem_mark_busy(&e->ext)) {
++
++ /* synchronous gc never fails, there is no need to set on
++ * NFT_SET_ELEM_DEAD_BIT.
++ */
++ if (nft_set_elem_expired(&e->ext)) {
+ priv->dirty = true;
+- pipapo_drop(m, rulemap);
+
+- rcu_barrier();
+- nft_set_elem_destroy(set, e, true);
++ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++ if (!gc)
++ break;
++
++ nft_pipapo_gc_deactivate(net, set, e);
++ pipapo_drop(m, rulemap);
++ nft_trans_gc_elem_add(gc, e);
+
+ /* And check again current first rule, which is now the
+ * first we haven't checked.
+@@ -1586,11 +1611,11 @@ static void pipapo_gc(const struct nft_s
+ }
+ }
+
+- e = nft_set_catchall_gc(set);
+- if (e)
+- nft_set_elem_destroy(set, e, true);
+-
+- priv->last_gc = jiffies;
++ gc = nft_trans_gc_catchall(gc, 0);
++ if (gc) {
++ nft_trans_gc_queue_sync_done(gc);
++ priv->last_gc = jiffies;
++ }
+ }
+
+ /**
+@@ -1715,7 +1740,6 @@ static void nft_pipapo_activate(const st
+ return;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+- nft_set_elem_clear_busy(&e->ext);
+ }
+
+ /**
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -46,6 +46,12 @@ static int nft_rbtree_cmp(const struct n
+ set->klen);
+ }
+
++static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
++{
++ return nft_set_elem_expired(&rbe->ext) ||
++ nft_set_elem_is_dead(&rbe->ext);
++}
++
+ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext,
+ unsigned int seq)
+@@ -80,7 +86,7 @@ static bool __nft_rbtree_lookup(const st
+ continue;
+ }
+
+- if (nft_set_elem_expired(&rbe->ext))
++ if (nft_rbtree_elem_expired(rbe))
+ return false;
+
+ if (nft_rbtree_interval_end(rbe)) {
+@@ -98,7 +104,7 @@ static bool __nft_rbtree_lookup(const st
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
+- !nft_set_elem_expired(&interval->ext) &&
++ !nft_rbtree_elem_expired(interval) &&
+ nft_rbtree_interval_start(interval)) {
+ *ext = &interval->ext;
+ return true;
+@@ -215,6 +221,18 @@ static void *nft_rbtree_get(const struct
+ return rbe;
+ }
+
++static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
++ struct nft_rbtree *priv,
++ struct nft_rbtree_elem *rbe)
++{
++ struct nft_set_elem elem = {
++ .priv = rbe,
++ };
++
++ nft_setelem_data_deactivate(net, set, &elem);
++ rb_erase(&rbe->node, &priv->root);
++}
++
+ static int nft_rbtree_gc_elem(const struct nft_set *__set,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe,
+@@ -222,11 +240,12 @@ static int nft_rbtree_gc_elem(const stru
+ {
+ struct nft_set *set = (struct nft_set *)__set;
+ struct rb_node *prev = rb_prev(&rbe->node);
++ struct net *net = read_pnet(&set->net);
+ struct nft_rbtree_elem *rbe_prev;
+- struct nft_set_gc_batch *gcb;
++ struct nft_trans_gc *gc;
+
+- gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+- if (!gcb)
++ gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
++ if (!gc)
+ return -ENOMEM;
+
+ /* search for end interval coming before this element.
+@@ -244,17 +263,28 @@ static int nft_rbtree_gc_elem(const stru
+
+ if (prev) {
+ rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
++ nft_rbtree_gc_remove(net, set, priv, rbe_prev);
+
+- rb_erase(&rbe_prev->node, &priv->root);
+- atomic_dec(&set->nelems);
+- nft_set_gc_batch_add(gcb, rbe_prev);
++ /* There is always room in this trans gc for this element,
++ * memory allocation never actually happens, hence, the warning
++ * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
++ * this is synchronous gc which never fails.
++ */
++ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++ if (WARN_ON_ONCE(!gc))
++ return -ENOMEM;
++
++ nft_trans_gc_elem_add(gc, rbe_prev);
+ }
+
+- rb_erase(&rbe->node, &priv->root);
+- atomic_dec(&set->nelems);
++ nft_rbtree_gc_remove(net, set, priv, rbe);
++ gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
++ if (WARN_ON_ONCE(!gc))
++ return -ENOMEM;
+
+- nft_set_gc_batch_add(gcb, rbe);
+- nft_set_gc_batch_complete(gcb);
++ nft_trans_gc_elem_add(gc, rbe);
++
++ nft_trans_gc_queue_sync_done(gc);
+
+ return 0;
+ }
+@@ -482,7 +512,6 @@ static void nft_rbtree_activate(const st
+ struct nft_rbtree_elem *rbe = elem->priv;
+
+ nft_set_elem_change_active(net, set, &rbe->ext);
+- nft_set_elem_clear_busy(&rbe->ext);
+ }
+
+ static bool nft_rbtree_flush(const struct net *net,
+@@ -490,12 +519,9 @@ static bool nft_rbtree_flush(const struc
+ {
+ struct nft_rbtree_elem *rbe = priv;
+
+- if (!nft_set_elem_mark_busy(&rbe->ext) ||
+- !nft_is_active(net, &rbe->ext)) {
+- nft_set_elem_change_active(net, set, &rbe->ext);
+- return true;
+- }
+- return false;
++ nft_set_elem_change_active(net, set, &rbe->ext);
++
++ return true;
+ }
+
+ static void *nft_rbtree_deactivate(const struct net *net,
+@@ -570,26 +596,40 @@ cont:
+
+ static void nft_rbtree_gc(struct work_struct *work)
+ {
+- struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
+- struct nft_set_gc_batch *gcb = NULL;
++ struct nft_rbtree_elem *rbe, *rbe_end = NULL;
++ struct nftables_pernet *nft_net;
+ struct nft_rbtree *priv;
++ struct nft_trans_gc *gc;
+ struct rb_node *node;
+ struct nft_set *set;
++ unsigned int gc_seq;
+ struct net *net;
+- u8 genmask;
+
+ priv = container_of(work, struct nft_rbtree, gc_work.work);
+ set = nft_set_container_of(priv);
+ net = read_pnet(&set->net);
+- genmask = nft_genmask_cur(net);
++ nft_net = nft_pernet(net);
++ gc_seq = READ_ONCE(nft_net->gc_seq);
++
++ gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
++ if (!gc)
++ goto done;
+
+ write_lock_bh(&priv->lock);
+ write_seqcount_begin(&priv->count);
+ for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
++
++ /* Ruleset has been updated, try later. */
++ if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
++ nft_trans_gc_destroy(gc);
++ gc = NULL;
++ goto try_later;
++ }
++
+ rbe = rb_entry(node, struct nft_rbtree_elem, node);
+
+- if (!nft_set_elem_active(&rbe->ext, genmask))
+- continue;
++ if (nft_set_elem_is_dead(&rbe->ext))
++ goto dead_elem;
+
+ /* elements are reversed in the rbtree for historical reasons,
+ * from highest to lowest value, that is why end element is
+@@ -602,46 +642,36 @@ static void nft_rbtree_gc(struct work_st
+ if (!nft_set_elem_expired(&rbe->ext))
+ continue;
+
+- if (nft_set_elem_mark_busy(&rbe->ext)) {
+- rbe_end = NULL;
++ nft_set_elem_dead(&rbe->ext);
++
++ if (!rbe_end)
+ continue;
+- }
+
+- if (rbe_prev) {
+- rb_erase(&rbe_prev->node, &priv->root);
+- rbe_prev = NULL;
+- }
+- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+- if (!gcb)
+- break;
++ nft_set_elem_dead(&rbe_end->ext);
+
+- atomic_dec(&set->nelems);
+- nft_set_gc_batch_add(gcb, rbe);
+- rbe_prev = rbe;
+-
+- if (rbe_end) {
+- atomic_dec(&set->nelems);
+- nft_set_gc_batch_add(gcb, rbe_end);
+- rb_erase(&rbe_end->node, &priv->root);
+- rbe_end = NULL;
+- }
+- node = rb_next(node);
+- if (!node)
+- break;
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++ if (!gc)
++ goto try_later;
++
++ nft_trans_gc_elem_add(gc, rbe_end);
++ rbe_end = NULL;
++dead_elem:
++ gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
++ if (!gc)
++ goto try_later;
++
++ nft_trans_gc_elem_add(gc, rbe);
+ }
+- if (rbe_prev)
+- rb_erase(&rbe_prev->node, &priv->root);
++
++ gc = nft_trans_gc_catchall(gc, gc_seq);
++
++try_later:
+ write_seqcount_end(&priv->count);
+ write_unlock_bh(&priv->lock);
+
+- rbe = nft_set_catchall_gc(set);
+- if (rbe) {
+- gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
+- if (gcb)
+- nft_set_gc_batch_add(gcb, rbe);
+- }
+- nft_set_gc_batch_complete(gcb);
+-
++ if (gc)
++ nft_trans_gc_queue_async_done(gc);
++done:
+ queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
+ nft_set_gc_interval(set));
+ }
--- /dev/null
+From 24138933b97b055d486e8064b4a1721702442a9b Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 9 Aug 2023 14:31:15 +0200
+Subject: netfilter: nf_tables: don't skip expired elements during walk
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 24138933b97b055d486e8064b4a1721702442a9b upstream.
+
+There is an asymmetry between commit/abort and preparation phase if the
+following conditions are met:
+
+1. set is a verdict map ("1.2.3.4 : jump foo")
+2. timeouts are enabled
+
+In this case, following sequence is problematic:
+
+1. element E in set S refers to chain C
+2. userspace requests removal of set S
+3. kernel does a set walk to decrement chain->use count for all elements
+ from preparation phase
+4. kernel does another set walk to remove elements from the commit phase
+ (or another walk to do a chain->use increment for all elements from
+ abort phase)
+
+If E has already expired in 1), it will be ignored during list walk, so its use count
+won't have been changed.
+
+Then, when set is culled, ->destroy callback will zap the element via
+nf_tables_set_elem_destroy(), but this function is only safe for
+elements that have been deactivated earlier from the preparation phase:
+lack of earlier deactivate removes the element but leaks the chain use
+count, which results in a WARN splat when the chain gets removed later,
+plus a leak of the nft_chain structure.
+
+Update pipapo_get() not to skip expired elements, otherwise flush
+command reports bogus ENOENT errors.
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Fixes: 8d8540c4f5e0 ("netfilter: nft_set_rbtree: add timeout support")
+Fixes: 9d0982927e79 ("netfilter: nft_hash: add support for timeouts")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 4 ++++
+ net/netfilter/nft_set_hash.c | 2 --
+ net/netfilter/nft_set_pipapo.c | 18 ++++++++++++------
+ net/netfilter/nft_set_rbtree.c | 2 --
+ 4 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5371,8 +5371,12 @@ static int nf_tables_dump_setelem(const
+ const struct nft_set_iter *iter,
+ struct nft_set_elem *elem)
+ {
++ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_set_dump_args *args;
+
++ if (nft_set_elem_expired(ext))
++ return 0;
++
+ args = container_of(iter, struct nft_set_dump_args, iter);
+ return nf_tables_fill_setelem(args->skb, set, elem);
+ }
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -278,8 +278,6 @@ static void nft_rhash_walk(const struct
+
+ if (iter->count < iter->skip)
+ goto cont;
+- if (nft_set_elem_expired(&he->ext))
+- goto cont;
+ if (!nft_set_elem_active(&he->ext, iter->genmask))
+ goto cont;
+
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -566,8 +566,7 @@ next_match:
+ goto out;
+
+ if (last) {
+- if (nft_set_elem_expired(&f->mt[b].e->ext) ||
+- (genmask &&
++ if ((genmask &&
+ !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+ goto next_match;
+
+@@ -601,8 +600,17 @@ out:
+ static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem, unsigned int flags)
+ {
+- return pipapo_get(net, set, (const u8 *)elem->key.val.data,
+- nft_genmask_cur(net));
++ struct nft_pipapo_elem *ret;
++
++ ret = pipapo_get(net, set, (const u8 *)elem->key.val.data,
++ nft_genmask_cur(net));
++ if (IS_ERR(ret))
++ return ret;
++
++ if (nft_set_elem_expired(&ret->ext))
++ return ERR_PTR(-ENOENT);
++
++ return ret;
+ }
+
+ /**
+@@ -2006,8 +2014,6 @@ static void nft_pipapo_walk(const struct
+ goto cont;
+
+ e = f->mt[r].e;
+- if (nft_set_elem_expired(&e->ext))
+- goto cont;
+
+ elem.priv = e;
+
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -552,8 +552,6 @@ static void nft_rbtree_walk(const struct
+
+ if (iter->count < iter->skip)
+ goto cont;
+- if (nft_set_elem_expired(&rbe->ext))
+- goto cont;
+ if (!nft_set_elem_active(&rbe->ext, iter->genmask))
+ goto cont;
+
--- /dev/null
+From c92db3030492b8ad1d0faace7a93bbcf53850d0c Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Wed, 9 Aug 2023 15:00:06 +0200
+Subject: netfilter: nft_set_hash: mark set element as dead when deleting from packet path
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit c92db3030492b8ad1d0faace7a93bbcf53850d0c upstream.
+
+Set on the NFT_SET_ELEM_DEAD_BIT flag on this element, instead of
+performing element removal which might race with an ongoing transaction.
+Enable gc when dynamic flag is set on since dynset deletion requires
+garbage collection after this patch.
+
+Fixes: d0a8d877da97 ("netfilter: nft_dynset: support for element deletion")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nft_set_hash.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -249,7 +249,9 @@ static bool nft_rhash_delete(const struc
+ if (he == NULL)
+ return false;
+
+- return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0;
++ nft_set_elem_dead(&he->ext);
++
++ return true;
+ }
+
+ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+@@ -412,7 +414,7 @@ static int nft_rhash_init(const struct n
+ return err;
+
+ INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
+- if (set->flags & NFT_SET_TIMEOUT)
++ if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
+ nft_rhash_gc_init(set);
+
+ return 0;
drm-amd-display-fix-the-build-when-drm_amd_dc_dcn-is-not-set.patch
drm-amd-display-trigger-timing-sync-only-if-tg-is-running.patch
io_uring-correct-check-for-o_tmpfile.patch
+iio-cros_ec-fix-the-allocation-size-for-cros_ec_command.patch
+iio-frequency-admv1013-propagate-errors-from-regulator_get_voltage.patch
+iio-adc-ad7192-fix-ac-excitation-feature.patch
+iio-adc-ina2xx-avoid-null-pointer-dereference-on-of-device-match.patch
+binder-fix-memory-leak-in-binder_init.patch
+misc-rtsx-judge-aspm-mode-to-set-petxcfg-reg.patch
+usb-storage-alauda-fix-uninit-value-in-alauda_check_media.patch
+usb-dwc3-properly-handle-processing-of-pending-events.patch
+usb-gadget-core-help-prevent-panic-during-uvc-unconfigure.patch
+usb-common-usb-conn-gpio-prevent-bailing-out-if-initial-role-is-none.patch
+usb-typec-tcpm-fix-response-to-vsafe0v-event.patch
+usb-typec-altmodes-displayport-signal-hpd-when-configuring-pin-assignment.patch
+x86-srso-fix-build-breakage-with-the-llvm-linker.patch
+x86-cpu-amd-enable-zenbleed-fix-for-amd-custom-apu-0405.patch
+x86-mm-fix-vdso-and-vvar-placement-on-5-level-paging-machines.patch
+x86-sev-do-not-try-to-parse-for-the-cc-blob-on-non-amd-hardware.patch
+x86-speculation-add-cpu_show_gds-prototype.patch
+x86-move-gds_ucode_mitigated-declaration-to-header.patch
+drm-nouveau-disp-revert-a-null-check-inside-nouveau_connector_get_modes.patch
+netfilter-nf_tables-don-t-skip-expired-elements-during-walk.patch
+netfilter-nf_tables-adapt-set-backend-to-use-gc-transaction-api.patch
+netfilter-nft_set_hash-mark-set-element-as-dead-when-deleting-from-packet-path.patch
+iio-core-prevent-invalid-memory-access-when-there-is-no-parent.patch
--- /dev/null
+From 8e21a620c7e6e00347ade1a6ed4967b359eada5a Mon Sep 17 00:00:00 2001
+From: Prashanth K <quic_prashk@quicinc.com>
+Date: Tue, 1 Aug 2023 14:33:52 +0530
+Subject: usb: common: usb-conn-gpio: Prevent bailing out if initial role is none
+
+From: Prashanth K <quic_prashk@quicinc.com>
+
+commit 8e21a620c7e6e00347ade1a6ed4967b359eada5a upstream.
+
+Currently if we bootup a device without cable connected, then
+usb-conn-gpio won't call set_role() because last_role is same
+as current role. This happens since last_role gets initialised
+to zero during the probe.
+
+To avoid this, add a new flag initial_detection into struct
+usb_conn_info, which prevents bailing out during initial
+detection.
+
+Cc: <stable@vger.kernel.org> # 5.4
+Fixes: 4602f3bff266 ("usb: common: add USB GPIO based connection detection driver")
+Signed-off-by: Prashanth K <quic_prashk@quicinc.com>
+Tested-by: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/1690880632-12588-1-git-send-email-quic_prashk@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/common/usb-conn-gpio.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -42,6 +42,7 @@ struct usb_conn_info {
+
+ struct power_supply_desc desc;
+ struct power_supply *charger;
++ bool initial_detection;
+ };
+
+ /*
+@@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct
+ dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n",
+ usb_role_string(info->last_role), usb_role_string(role), id, vbus);
+
+- if (info->last_role == role) {
++ if (!info->initial_detection && info->last_role == role) {
+ dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role));
+ return;
+ }
+
++ info->initial_detection = false;
++
+ if (info->last_role == USB_ROLE_HOST && info->vbus)
+ regulator_disable(info->vbus);
+
+@@ -258,6 +261,7 @@ static int usb_conn_probe(struct platfor
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ /* Perform initial detection */
++ info->initial_detection = true;
+ usb_conn_queue_dwork(info, 0);
+
+ return 0;
--- /dev/null
+From 3ddaa6a274578e23745b7466346fc2650df8f959 Mon Sep 17 00:00:00 2001
+From: Elson Roy Serrao <quic_eserrao@quicinc.com>
+Date: Tue, 1 Aug 2023 12:26:58 -0700
+Subject: usb: dwc3: Properly handle processing of pending events
+
+From: Elson Roy Serrao <quic_eserrao@quicinc.com>
+
+commit 3ddaa6a274578e23745b7466346fc2650df8f959 upstream.
+
+If dwc3 is runtime suspended we defer processing the event buffer
+until resume, by setting the pending_events flag. Set this flag before
+triggering resume to avoid race with the runtime resume callback.
+
+While handling the pending events, in addition to checking the event
+buffer we also need to process it. Handle this by explicitly calling
+dwc3_thread_interrupt(). Also balance the runtime pm get() operation
+that triggered this processing.
+
+Cc: stable@vger.kernel.org
+Fixes: fc8bb91bc83e ("usb: dwc3: implement runtime PM")
+Signed-off-by: Elson Roy Serrao <quic_eserrao@quicinc.com>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Reviewed-by: Roger Quadros <rogerq@kernel.org>
+Link: https://lore.kernel.org/r/20230801192658.19275-1-quic_eserrao@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/gadget.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -4342,9 +4342,14 @@ static irqreturn_t dwc3_check_event_buf(
+ u32 count;
+
+ if (pm_runtime_suspended(dwc->dev)) {
++ dwc->pending_events = true;
++ /*
++ * Trigger runtime resume. The get() function will be balanced
++ * after processing the pending events in dwc3_process_pending
++ * events().
++ */
+ pm_runtime_get(dwc->dev);
+ disable_irq_nosync(dwc->irq_gadget);
+- dwc->pending_events = true;
+ return IRQ_HANDLED;
+ }
+
+@@ -4609,6 +4614,8 @@ void dwc3_gadget_process_pending_events(
+ {
+ if (dwc->pending_events) {
+ dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
++ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
++ pm_runtime_put(dwc->dev);
+ dwc->pending_events = false;
+ enable_irq(dwc->irq_gadget);
+ }
--- /dev/null
+From 65dadb2beeb7360232b09ebc4585b54475dfee06 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Sat, 29 Jul 2023 10:59:38 -0400
+Subject: USB: Gadget: core: Help prevent panic during UVC unconfigure
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 65dadb2beeb7360232b09ebc4585b54475dfee06 upstream.
+
+Avichal Rakesh reported a kernel panic that occurred when the UVC
+gadget driver was removed from a gadget's configuration. The panic
+involves a somewhat complicated interaction between the kernel driver
+and a userspace component (as described in the Link tag below), but
+the analysis did make one thing clear: The Gadget core should
+accomodate gadget drivers calling usb_gadget_deactivate() as part of
+their unbind procedure.
+
+Currently this doesn't work. gadget_unbind_driver() calls
+driver->unbind() while holding the udc->connect_lock mutex, and
+usb_gadget_deactivate() attempts to acquire that mutex, which will
+result in a deadlock.
+
+The simple fix is for gadget_unbind_driver() to release the mutex when
+invoking the ->unbind() callback. There is no particular reason for
+it to be holding the mutex at that time, and the mutex isn't held
+while the ->bind() callback is invoked. So we'll drop the mutex
+before performing the unbind callback and reacquire it afterward.
+
+We'll also add a couple of comments to usb_gadget_activate() and
+usb_gadget_deactivate(). Because they run in process context they
+must not be called from a gadget driver's ->disconnect() callback,
+which (according to the kerneldoc for struct usb_gadget_driver in
+include/linux/usb/gadget.h) may run in interrupt context. This may
+help prevent similar bugs from arising in the future.
+
+Reported-and-tested-by: Avichal Rakesh <arakesh@google.com>
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Fixes: 286d9975a838 ("usb: gadget: udc: core: Prevent soft_connect_store() race")
+Link: https://lore.kernel.org/linux-usb/4d7aa3f4-22d9-9f5a-3d70-1bd7148ff4ba@google.com/
+Cc: Badhri Jagan Sridharan <badhri@google.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/48b2f1f1-0639-46bf-bbfc-98cb05a24914@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -795,6 +795,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect)
+ * usb_gadget_activate() is called. For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
++ * This routine may sleep; it must not be called in interrupt context
++ * (such as from within a gadget driver's disconnect() callback).
++ *
+ * Returns zero on success, else negative errno.
+ */
+ int usb_gadget_deactivate(struct usb_gadget *gadget)
+@@ -833,6 +836,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate)
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
++ * This routine may sleep; it must not be called in interrupt context.
++ *
+ * Returns zero on success, else negative errno.
+ */
+ int usb_gadget_activate(struct usb_gadget *gadget)
+@@ -1611,7 +1616,11 @@ static void gadget_unbind_driver(struct
+ usb_gadget_disable_async_callbacks(udc);
+ if (gadget->irq)
+ synchronize_irq(gadget->irq);
++ mutex_unlock(&udc->connect_lock);
++
+ udc->driver->unbind(gadget);
++
++ mutex_lock(&udc->connect_lock);
+ usb_gadget_udc_stop_locked(udc);
+ mutex_unlock(&udc->connect_lock);
+
--- /dev/null
+From a6ff6e7a9dd69364547751db0f626a10a6d628d2 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Wed, 2 Aug 2023 13:49:02 -0400
+Subject: usb-storage: alauda: Fix uninit-value in alauda_check_media()
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit a6ff6e7a9dd69364547751db0f626a10a6d628d2 upstream.
+
+Syzbot got KMSAN to complain about access to an uninitialized value in
+the alauda subdriver of usb-storage:
+
+BUG: KMSAN: uninit-value in alauda_transport+0x462/0x57f0
+drivers/usb/storage/alauda.c:1137
+CPU: 0 PID: 12279 Comm: usb-storage Not tainted 5.3.0-rc7+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
+Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x191/0x1f0 lib/dump_stack.c:113
+ kmsan_report+0x13a/0x2b0 mm/kmsan/kmsan_report.c:108
+ __msan_warning+0x73/0xe0 mm/kmsan/kmsan_instr.c:250
+ alauda_check_media+0x344/0x3310 drivers/usb/storage/alauda.c:460
+
+The problem is that alauda_check_media() doesn't verify that its USB
+transfer succeeded before trying to use the received data. What
+should happen if the transfer fails isn't entirely clear, but a
+reasonably conservative approach is to pretend that no media is
+present.
+
+A similar problem exists in a usb_stor_dbg() call in
+alauda_get_media_status(). In this case, when an error occurs the
+call is redundant, because usb_stor_ctrl_transfer() already will print
+a debugging message.
+
+Finally, unrelated to the uninitialized memory access, is the fact
+that alauda_check_media() performs DMA to a buffer on the stack.
+Fortunately usb-storage provides a general purpose DMA-able buffer for
+uses like this. We'll use it instead.
+
+Reported-and-tested-by: syzbot+e7d46eb426883fb97efd@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/0000000000007d25ff059457342d@google.com/T/
+Suggested-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Fixes: e80b0fade09e ("[PATCH] USB Storage: add alauda support")
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/693d5d5e-f09b-42d0-8ed9-1f96cd30bcce@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/storage/alauda.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -318,7 +318,8 @@ static int alauda_get_media_status(struc
+ rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
+ command, 0xc0, 0, 1, data, 2);
+
+- usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
++ if (rc == USB_STOR_XFER_GOOD)
++ usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
+
+ return rc;
+ }
+@@ -454,9 +455,14 @@ static int alauda_init_media(struct us_d
+ static int alauda_check_media(struct us_data *us)
+ {
+ struct alauda_info *info = (struct alauda_info *) us->extra;
+- unsigned char status[2];
++ unsigned char *status = us->iobuf;
++ int rc;
+
+- alauda_get_media_status(us, status);
++ rc = alauda_get_media_status(us, status);
++ if (rc != USB_STOR_XFER_GOOD) {
++ status[0] = 0xF0; /* Pretend there's no media */
++ status[1] = 0;
++ }
+
+ /* Check for no media or door open */
+ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
--- /dev/null
+From 5a5ccd61cfd76156cb3e0373c300c509d05448ce Mon Sep 17 00:00:00 2001
+From: RD Babiera <rdbabiera@google.com>
+Date: Wed, 26 Jul 2023 02:09:02 +0000
+Subject: usb: typec: altmodes/displayport: Signal hpd when configuring pin assignment
+
+From: RD Babiera <rdbabiera@google.com>
+
+commit 5a5ccd61cfd76156cb3e0373c300c509d05448ce upstream.
+
+When connecting to some DisplayPort partners, the initial status update
+after entering DisplayPort Alt Mode notifies that the DFP_D/UFP_D is not in
+the connected state. This leads to sending a configure message that keeps
+the device in USB mode. The port partner then sets DFP_D/UFP_D to the
+connected state and HPD to high in the same Attention message. Currently,
+the HPD signal is dropped in order to handle configuration.
+
+This patch saves changes to the HPD signal when the device chooses to
+configure during dp_altmode_status_update, and invokes sysfs_notify if
+necessary for HPD after configuring.
+
+Fixes: 0e3bb7d6894d ("usb: typec: Add driver for DisplayPort alternate mode")
+Cc: stable@vger.kernel.org
+Signed-off-by: RD Babiera <rdbabiera@google.com>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20230726020903.1409072-1-rdbabiera@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/altmodes/displayport.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -60,6 +60,7 @@ struct dp_altmode {
+
+ enum dp_state state;
+ bool hpd;
++ bool pending_hpd;
+
+ struct mutex lock; /* device lock */
+ struct work_struct work;
+@@ -144,8 +145,13 @@ static int dp_altmode_status_update(stru
+ dp->state = DP_STATE_EXIT;
+ } else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
+ ret = dp_altmode_configure(dp, con);
+- if (!ret)
++ if (!ret) {
+ dp->state = DP_STATE_CONFIGURE;
++ if (dp->hpd != hpd) {
++ dp->hpd = hpd;
++ dp->pending_hpd = true;
++ }
++ }
+ } else {
+ if (dp->hpd != hpd) {
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
+@@ -160,6 +166,16 @@ static int dp_altmode_configured(struct
+ {
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
++ /*
++ * If the DFP_D/UFP_D sends a change in HPD when first notifying the
++ * DisplayPort driver that it is connected, then we wait until
++ * configuration is complete to signal HPD.
++ */
++ if (dp->pending_hpd) {
++ drm_connector_oob_hotplug_event(dp->connector_fwnode);
++ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
++ dp->pending_hpd = false;
++ }
+
+ return dp_altmode_notify(dp);
+ }
--- /dev/null
+From 4270d2b4845e820b274702bfc2a7140f69e4d19d Mon Sep 17 00:00:00 2001
+From: Badhri Jagan Sridharan <badhri@google.com>
+Date: Wed, 12 Jul 2023 08:57:22 +0000
+Subject: usb: typec: tcpm: Fix response to vsafe0V event
+
+From: Badhri Jagan Sridharan <badhri@google.com>
+
+commit 4270d2b4845e820b274702bfc2a7140f69e4d19d upstream.
+
+Do not transition to SNK_UNATTACHED state when receiving vsafe0v event
+while in SNK_HARD_RESET_WAIT_VBUS. Ignore VBUS off events as well as
+in some platforms VBUS off can be signalled more than once.
+
+[143515.364753] Requesting mux state 1, usb-role 2, orientation 2
+[143515.365520] pending state change SNK_HARD_RESET_SINK_OFF -> SNK_HARD_RESET_SINK_ON @ 650 ms [rev3 HARD_RESET]
+[143515.632281] CC1: 0 -> 0, CC2: 3 -> 0 [state SNK_HARD_RESET_SINK_OFF, polarity 1, disconnected]
+[143515.637214] VBUS on
+[143515.664985] VBUS off
+[143515.664992] state change SNK_HARD_RESET_SINK_OFF -> SNK_HARD_RESET_WAIT_VBUS [rev3 HARD_RESET]
+[143515.665564] VBUS VSAFE0V
+[143515.665566] state change SNK_HARD_RESET_WAIT_VBUS -> SNK_UNATTACHED [rev3 HARD_RESET]
+
+Fixes: 28b43d3d746b ("usb: typec: tcpm: Introduce vsafe0v for vbus")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Badhri Jagan Sridharan <badhri@google.com>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://lore.kernel.org/r/20230712085722.1414743-1-badhri@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -5322,6 +5322,10 @@ static void _tcpm_pd_vbus_off(struct tcp
+ /* Do nothing, vbus drop expected */
+ break;
+
++ case SNK_HARD_RESET_WAIT_VBUS:
++ /* Do nothing, its OK to receive vbus off events */
++ break;
++
+ default:
+ if (port->pwr_role == TYPEC_SINK && port->attached)
+ tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
+@@ -5368,6 +5372,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct
+ case SNK_DEBOUNCED:
+ /*Do nothing, still waiting for VSAFE5V for connect */
+ break;
++ case SNK_HARD_RESET_WAIT_VBUS:
++ /* Do nothing, its OK to receive vbus off events */
++ break;
+ default:
+ if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
--- /dev/null
+From 6dbef74aeb090d6bee7d64ef3fa82ae6fa53f271 Mon Sep 17 00:00:00 2001
+From: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+Date: Fri, 11 Aug 2023 23:37:05 +0300
+Subject: x86/cpu/amd: Enable Zenbleed fix for AMD Custom APU 0405
+
+From: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+
+commit 6dbef74aeb090d6bee7d64ef3fa82ae6fa53f271 upstream.
+
+Commit
+
+ 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix")
+
+provided a fix for the Zen2 VZEROUPPER data corruption bug affecting
+a range of CPU models, but the AMD Custom APU 0405 found on SteamDeck
+was not listed, although it is clearly affected by the vulnerability.
+
+Add this CPU variant to the Zenbleed erratum list, in order to
+unconditionally enable the fallback fix until a proper microcode update
+is available.
+
+Fixes: 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix")
+Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230811203705.1699914-1-cristian.ciocaltea@collabora.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/amd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -73,6 +73,7 @@ static const int amd_erratum_1054[] =
+ static const int amd_zenbleed[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
++ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+
+ static const int amd_div0[] =
--- /dev/null
+From 1b8b1aa90c9c0e825b181b98b8d9e249dc395470 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Thu, 3 Aug 2023 18:16:09 +0300
+Subject: x86/mm: Fix VDSO and VVAR placement on 5-level paging machines
+
+From: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+
+commit 1b8b1aa90c9c0e825b181b98b8d9e249dc395470 upstream.
+
+Yingcong has noticed that on the 5-level paging machine, VDSO and VVAR
+VMAs are placed above the 47-bit border:
+
+8000001a9000-8000001ad000 r--p 00000000 00:00 0 [vvar]
+8000001ad000-8000001af000 r-xp 00000000 00:00 0 [vdso]
+
+This might confuse users who are not aware of 5-level paging and expect
+all userspace addresses to be under the 47-bit border.
+
+So far problem has only been triggered with ASLR disabled, although it
+may also occur with ASLR enabled if the layout is randomized in a just
+right way.
+
+The problem happens due to custom placement for the VMAs in the VDSO
+code: vdso_addr() tries to place them above the stack and checks the
+result against TASK_SIZE_MAX, which is wrong. TASK_SIZE_MAX is set to
+the 56-bit border on 5-level paging machines. Use DEFAULT_MAP_WINDOW
+instead.
+
+Fixes: b569bab78d8d ("x86/mm: Prepare to expose larger address space to userspace")
+Reported-by: Yingcong Wu <yingcong.wu@intel.com>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20230803151609.22141-1-kirill.shutemov%40linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/entry/vdso/vma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+- if (end >= TASK_SIZE_MAX)
+- end = TASK_SIZE_MAX;
++ if (end >= DEFAULT_MAP_WINDOW)
++ end = DEFAULT_MAP_WINDOW;
+ end -= len;
+
+ if (end > start) {
--- /dev/null
+From eb3515dc99c7c85f4170b50838136b2a193f8012 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 9 Aug 2023 15:05:00 +0200
+Subject: x86: Move gds_ucode_mitigated() declaration to header
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit eb3515dc99c7c85f4170b50838136b2a193f8012 upstream.
+
+The declaration got placed in the .c file of the caller, but that
+causes a warning for the definition:
+
+arch/x86/kernel/cpu/bugs.c:682:6: error: no previous prototype for 'gds_ucode_mitigated' [-Werror=missing-prototypes]
+
+Move it to a header where both sides can observe it instead.
+
+Fixes: 81ac7e5d74174 ("KVM: Add GDS_NO support to KVM")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Tested-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/all/20230809130530.1913368-2-arnd%40kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/processor.h | 2 ++
+ arch/x86/kvm/x86.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -867,4 +867,6 @@ bool arch_is_platform_page(u64 paddr);
+ #define arch_is_platform_page arch_is_platform_page
+ #endif
+
++extern bool gds_ucode_mitigated(void);
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -311,8 +311,6 @@ u64 __read_mostly host_xcr0;
+
+ static struct kmem_cache *x86_emulator_cache;
+
+-extern bool gds_ucode_mitigated(void);
+-
+ /*
+ * When called, it means the previous get/set msr reached an invalid msr.
+ * Return true if we want to ignore/silent this failed msr access.
--- /dev/null
+From bee6cf1a80b54548a039e224c651bb15b644a480 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Sun, 16 Jul 2023 20:22:20 +0200
+Subject: x86/sev: Do not try to parse for the CC blob on non-AMD hardware
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit bee6cf1a80b54548a039e224c651bb15b644a480 upstream.
+
+Tao Liu reported a boot hang on an Intel Atom machine due to an unmapped
+EFI config table. The reason being that the CC blob which contains the
+CPUID page for AMD SNP guests is parsed for before even checking
+whether the machine runs on AMD hardware.
+
+Usually that's not a problem on !AMD hw - it simply won't find the CC
+blob's GUID and return. However, if any parts of the config table
+pointers array is not mapped, the kernel will #PF very early in the
+decompressor stage without any opportunity to recover.
+
+Therefore, do a superficial CPUID check before poking for the CC blob.
+This will fix the current issue on real hardware. It would also work as
+a guest on a non-lying hypervisor.
+
+For the lying hypervisor, the check is done again, *after* parsing the
+CC blob as the real CPUID page will be present then.
+
+Clear the #VC handler in case SEV-{ES,SNP} hasn't been detected, as
+a precaution.
+
+Fixes: c01fce9cef84 ("x86/compressed: Add SEV-SNP feature detection/setup")
+Reported-by: Tao Liu <ltao@redhat.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Tao Liu <ltao@redhat.com>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230601072043.24439-1-ltao@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/boot/compressed/idt_64.c | 9 ++++++++-
+ arch/x86/boot/compressed/sev.c | 37 +++++++++++++++++++++++++++++++++++--
+ 2 files changed, 43 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/boot/compressed/idt_64.c
++++ b/arch/x86/boot/compressed/idt_64.c
+@@ -63,7 +63,14 @@ void load_stage2_idt(void)
+ set_idt_entry(X86_TRAP_PF, boot_page_fault);
+
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+- set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
++ /*
++ * Clear the second stage #VC handler in case guest types
++ * needing #VC have not been detected.
++ */
++ if (sev_status & BIT(1))
++ set_idt_entry(X86_TRAP_VC, boot_stage2_vc);
++ else
++ set_idt_entry(X86_TRAP_VC, NULL);
+ #endif
+
+ load_boot_idt(&boot_idt_desc);
+--- a/arch/x86/boot/compressed/sev.c
++++ b/arch/x86/boot/compressed/sev.c
+@@ -355,12 +355,45 @@ void sev_enable(struct boot_params *bp)
+ bp->cc_blob_address = 0;
+
+ /*
++ * Do an initial SEV capability check before snp_init() which
++ * loads the CPUID page and the same checks afterwards are done
++ * without the hypervisor and are trustworthy.
++ *
++ * If the HV fakes SEV support, the guest will crash'n'burn
++ * which is good enough.
++ */
++
++ /* Check for the SME/SEV support leaf */
++ eax = 0x80000000;
++ ecx = 0;
++ native_cpuid(&eax, &ebx, &ecx, &edx);
++ if (eax < 0x8000001f)
++ return;
++
++ /*
++ * Check for the SME/SEV feature:
++ * CPUID Fn8000_001F[EAX]
++ * - Bit 0 - Secure Memory Encryption support
++ * - Bit 1 - Secure Encrypted Virtualization support
++ * CPUID Fn8000_001F[EBX]
++ * - Bits 5:0 - Pagetable bit position used to indicate encryption
++ */
++ eax = 0x8000001f;
++ ecx = 0;
++ native_cpuid(&eax, &ebx, &ecx, &edx);
++ /* Check whether SEV is supported */
++ if (!(eax & BIT(1)))
++ return;
++
++ /*
+ * Setup/preliminary detection of SNP. This will be sanity-checked
+ * against CPUID/MSR values later.
+ */
+ snp = snp_init(bp);
+
+- /* Check for the SME/SEV support leaf */
++ /* Now repeat the checks with the SNP CPUID table. */
++
++ /* Recheck the SME/SEV support leaf */
+ eax = 0x80000000;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+@@ -368,7 +401,7 @@ void sev_enable(struct boot_params *bp)
+ return;
+
+ /*
+- * Check for the SME/SEV feature:
++ * Recheck for the SME/SEV feature:
+ * CPUID Fn8000_001F[EAX]
+ * - Bit 0 - Secure Memory Encryption support
+ * - Bit 1 - Secure Encrypted Virtualization support
--- /dev/null
+From a57c27c7ad85c420b7de44c6ee56692d51709dda Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 9 Aug 2023 15:04:59 +0200
+Subject: x86/speculation: Add cpu_show_gds() prototype
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit a57c27c7ad85c420b7de44c6ee56692d51709dda upstream.
+
+The newly added function has two definitions but no prototypes:
+
+drivers/base/cpu.c:605:16: error: no previous prototype for 'cpu_show_gds' [-Werror=missing-prototypes]
+
+Add a declaration next to the other ones for this file to avoid the
+warning.
+
+Fixes: 8974eb588283b ("x86/speculation: Add Gather Data Sampling mitigation")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Tested-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/all/20230809130530.1913368-1-arnd%40kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cpu.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_gds(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
--- /dev/null
+From cbe8ded48b939b9d55d2c5589ab56caa7b530709 Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Wed, 9 Aug 2023 09:40:26 -0700
+Subject: x86/srso: Fix build breakage with the LLVM linker
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit cbe8ded48b939b9d55d2c5589ab56caa7b530709 upstream.
+
+The assertion added to verify the difference in bits set of the
+addresses of srso_untrain_ret_alias() and srso_safe_ret_alias() would fail
+to link in LLVM's ld.lld linker with the following error:
+
+ ld.lld: error: ./arch/x86/kernel/vmlinux.lds:210: at least one side of
+ the expression must be absolute
+ ld.lld: error: ./arch/x86/kernel/vmlinux.lds:211: at least one side of
+ the expression must be absolute
+
+Use ABSOLUTE to evaluate the expression referring to at least one of the
+symbols so that LLD can evaluate the linker script.
+
+Also, add linker version info to the comment about XOR being unsupported
+in either ld.bfd or ld.lld until somewhat recently.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Closes: https://lore.kernel.org/llvm/CA+G9fYsdUeNu-gwbs0+T6XHi4hYYk=Y9725-wFhZ7gJMspLDRA@mail.gmail.com/
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Reported-by: Daniel Kolesa <daniel@octaforge.org>
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Suggested-by: Sven Volkinsfeld <thyrc@gmx.net>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://github.com/ClangBuiltLinux/linux/issues/1907
+Link: https://lore.kernel.org/r/20230809-gds-v1-1-eaac90b0cbcc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/vmlinux.lds.S | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -514,11 +514,17 @@ INIT_PER_CPU(irq_stack_backing_store);
+
+ #ifdef CONFIG_CPU_SRSO
+ /*
+- * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
++ * GNU ld cannot do XOR until 2.41.
++ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
++ *
++ * LLVM lld cannot do XOR until lld-17.
++ * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
++ *
++ * Instead do: (A | B) - (A & B) in order to compute the XOR
+ * of the two function addresses:
+ */
+-. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
+- (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
++ (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ "SRSO function pair won't alias");
+ #endif
+