--- /dev/null
+From 096f58507374e1293a9e9cff8a1ccd5f37780a20 Mon Sep 17 00:00:00 2001
+From: Jonathan Bakker <xc-racer2@live.ca>
+Date: Sun, 27 Mar 2022 11:08:50 -0700
+Subject: ARM: dts: s5pv210: Remove spi-cs-high on panel in Aries
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+commit 096f58507374e1293a9e9cff8a1ccd5f37780a20 upstream.
+
+Since commit 766c6b63aa04 ("spi: fix client driver breakages when using
+GPIO descriptors"), the panel has been blank due to an inverted CS GPIO.
+In order to correct this, drop the spi-cs-high from the panel SPI device.
+
+Fixes: 766c6b63aa04 ("spi: fix client driver breakages when using GPIO descriptors")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/CY4PR04MB05670C771062570E911AF3B4CB1C9@CY4PR04MB0567.namprd04.prod.outlook.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/s5pv210-aries.dtsi | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -564,7 +564,6 @@
+ reset-gpios = <&mp05 5 GPIO_ACTIVE_LOW>;
+ vdd3-supply = <&ldo7_reg>;
+ vci-supply = <&ldo17_reg>;
+- spi-cs-high;
+ spi-max-frequency = <1200000>;
+
+ pinctrl-names = "default";
--- /dev/null
+From 2672a4bff6c03a20d5ae460a091f67ee782c3eff Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Wed, 11 Sep 2019 22:31:51 +0200
+Subject: ARM: pxa: maybe fix gpio lookup tables
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 2672a4bff6c03a20d5ae460a091f67ee782c3eff upstream.
+
+From inspection I found a couple of GPIO lookups that are
+listed with device "gpio-pxa", but actually have a number
+from a different gpio controller.
+
+Try to rectify that here, with a guess of what the actual
+device name is.
+
+Acked-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mach-pxa/cm-x300.c | 8 ++++----
+ arch/arm/mach-pxa/magician.c | 2 +-
+ arch/arm/mach-pxa/tosa.c | 4 ++--
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -354,13 +354,13 @@ static struct platform_device cm_x300_sp
+ static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
+ .dev_id = "spi_gpio",
+ .table = {
+- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
++ GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
+ "sck", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
++ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
+ "mosi", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
++ GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
+ "miso", GPIO_ACTIVE_HIGH),
+- GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
++ GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
+ "cs", GPIO_ACTIVE_HIGH),
+ { },
+ },
+--- a/arch/arm/mach-pxa/magician.c
++++ b/arch/arm/mach-pxa/magician.c
+@@ -681,7 +681,7 @@ static struct platform_device bq24022 =
+ static struct gpiod_lookup_table bq24022_gpiod_table = {
+ .dev_id = "gpio-regulator",
+ .table = {
+- GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
++ GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
+ NULL, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
+ "enable", GPIO_ACTIVE_LOW),
+--- a/arch/arm/mach-pxa/tosa.c
++++ b/arch/arm/mach-pxa/tosa.c
+@@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mc
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
+ "cd", GPIO_ACTIVE_LOW),
+- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
++ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
+ "wp", GPIO_ACTIVE_LOW),
+- GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
++ GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
+ "power", GPIO_ACTIVE_HIGH),
+ { },
+ },
--- /dev/null
+From f607dd767f5d6800ffbdce5b99ba81763b023781 Mon Sep 17 00:00:00 2001
+From: Kathiravan T <quic_kathirav@quicinc.com>
+Date: Fri, 11 Feb 2022 17:44:15 +0530
+Subject: arm64: dts: qcom: ipq8074: fix the sleep clock frequency
+
+From: Kathiravan T <quic_kathirav@quicinc.com>
+
+commit f607dd767f5d6800ffbdce5b99ba81763b023781 upstream.
+
+Sleep clock frequency should be 32768Hz. Lets fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: 41dac73e243d ("arm64: dts: Add ipq8074 SoC and HK01 board support")
+Link: https://lore.kernel.org/all/e2a447f8-6024-0369-f698-2027b6edcf9e@codeaurora.org/
+Signed-off-by: Kathiravan T <quic_kathirav@quicinc.com>
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/1644581655-11568-1-git-send-email-quic_kathirav@quicinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/qcom/ipq8074.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -13,7 +13,7 @@
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+- clock-frequency = <32000>;
++ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+
--- /dev/null
+From 0017f2c856e21bb900be88469e15dac4f41f4065 Mon Sep 17 00:00:00 2001
+From: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+Date: Fri, 29 Apr 2022 13:58:43 +0100
+Subject: arm64: tegra: Add missing DFLL reset on Tegra210
+
+From: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+
+commit 0017f2c856e21bb900be88469e15dac4f41f4065 upstream.
+
+Commit 4782c0a5dd88 ("clk: tegra: Don't deassert reset on enabling
+clocks") removed deassertion of reset lines when enabling peripheral
+clocks. This breaks the initialization of the DFLL driver which relied
+on this behaviour.
+
+In order to be able to fix this, add the corresponding reset to the DT.
+Tested on Google Pixel C.
+
+Cc: stable@vger.kernel.org
+Fixes: 4782c0a5dd88 ("clk: tegra: Don't deassert reset on enabling clocks")
+Signed-off-by: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/nvidia/tegra210.dtsi | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -1355,8 +1355,9 @@
+ <&tegra_car TEGRA210_CLK_DFLL_REF>,
+ <&tegra_car TEGRA210_CLK_I2C5>;
+ clock-names = "soc", "ref", "i2c";
+- resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>;
+- reset-names = "dvco";
++ resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>,
++ <&tegra_car 155>;
++ reset-names = "dvco", "dfll";
+ #clock-cells = <0>;
+ clock-output-names = "dfllCPU_out";
+ status = "disabled";
--- /dev/null
+From 4213ff556740bb45e2d9ff0f50d056c4e7dd0921 Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@kernel.org>
+Date: Thu, 28 Apr 2022 17:24:44 +0100
+Subject: ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control
+
+From: Mark Brown <broonie@kernel.org>
+
+commit 4213ff556740bb45e2d9ff0f50d056c4e7dd0921 upstream.
+
+The driver has a custom put function for "DSP Voice Wake Up" which does
+not generate event notifications on change, instead returning 0. Since we
+already exit early in the case that there is no change this can be fixed
+by unconditionally returning 1 at the end of the function.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220428162444.3883147-1-broonie@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/rt5514.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/rt5514.c
++++ b/sound/soc/codecs/rt5514.c
+@@ -419,7 +419,7 @@ static int rt5514_dsp_voice_wake_up_put(
+ }
+ }
+
+- return 0;
++ return 1;
+ }
+
+ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
--- /dev/null
+From 32feee36c30ea06e38ccb8ae6e5c44c6eec790a6 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:36 +0800
+Subject: bcache: avoid journal no-space deadlock by reserving 1 journal bucket
+
+From: Coly Li <colyli@suse.de>
+
+commit 32feee36c30ea06e38ccb8ae6e5c44c6eec790a6 upstream.
+
+The journal no-space deadlock was reported time to time. Such deadlock
+can happen in the following situation.
+
+When all journal buckets are fully filled by active jset with heavy
+write I/O load, the cache set registration (after a reboot) will load
+all active jsets and inserting them into the btree again (which is
+called journal replay). If a journaled bkey is inserted into a btree
+node and results btree node split, new journal request might be
+triggered. For example, the btree grows one more level after the node
+split, then the root node record in cache device super block will be
+upgrade by bch_journal_meta() from bch_btree_set_root(). But there is no
+space in journal buckets, the journal replay has to wait for new journal
+bucket to be reclaimed after at least one journal bucket replayed. This
+is one example that how the journal no-space deadlock happens.
+
+The solution to avoid the deadlock is to reserve 1 journal bucket in
+run time, and only permit the reserved journal bucket to be used during
+cache set registration procedure for things like journal replay. Then
+the journal space will never be fully filled, there is no chance for
+journal no-space deadlock to happen anymore.
+
+This patch adds a new member "bool do_reserve" in struct journal, it is
+inititalized to 0 (false) when struct journal is allocated, and set to
+1 (true) by bch_journal_space_reserve() when all initialization done in
+run_cache_set(). In the run time when journal_reclaim() tries to
+allocate a new journal bucket, free_journal_buckets() is called to check
+whether there are enough free journal buckets to use. If there is only
+1 free journal bucket and journal->do_reserve is 1 (true), the last
+bucket is reserved and free_journal_buckets() will return 0 to indicate
+no free journal bucket. Then journal_reclaim() will give up, and try
+next time to see whetheer there is free journal bucket to allocate. By
+this method, there is always 1 jouranl bucket reserved in run time.
+
+During the cache set registration, journal->do_reserve is 0 (false), so
+the reserved journal bucket can be used to avoid the no-space deadlock.
+
+Reported-by: Nikhil Kshirsagar <nkshirsagar@gmail.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-5-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/journal.c | 31 ++++++++++++++++++++++++++-----
+ drivers/md/bcache/journal.h | 2 ++
+ drivers/md/bcache/super.c | 1 +
+ 3 files changed, 29 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -407,6 +407,11 @@ err:
+ return ret;
+ }
+
++void bch_journal_space_reserve(struct journal *j)
++{
++ j->do_reserve = true;
++}
++
+ /* Journalling */
+
+ static void btree_flush_write(struct cache_set *c)
+@@ -625,12 +630,30 @@ static void do_journal_discard(struct ca
+ }
+ }
+
++static unsigned int free_journal_buckets(struct cache_set *c)
++{
++ struct journal *j = &c->journal;
++ struct cache *ca = c->cache;
++ struct journal_device *ja = &c->cache->journal;
++ unsigned int n;
++
++ /* In case njournal_buckets is not power of 2 */
++ if (ja->cur_idx >= ja->discard_idx)
++ n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
++ else
++ n = ja->discard_idx - ja->cur_idx;
++
++ if (n > (1 + j->do_reserve))
++ return n - (1 + j->do_reserve);
++
++ return 0;
++}
++
+ static void journal_reclaim(struct cache_set *c)
+ {
+ struct bkey *k = &c->journal.key;
+ struct cache *ca = c->cache;
+ uint64_t last_seq;
+- unsigned int next;
+ struct journal_device *ja = &ca->journal;
+ atomic_t p __maybe_unused;
+
+@@ -653,12 +676,10 @@ static void journal_reclaim(struct cache
+ if (c->journal.blocks_free)
+ goto out;
+
+- next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+- /* No space available on this device */
+- if (next == ja->discard_idx)
++ if (!free_journal_buckets(c))
+ goto out;
+
+- ja->cur_idx = next;
++ ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+ k->ptr[0] = MAKE_PTR(0,
+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+ ca->sb.nr_this_dev);
+--- a/drivers/md/bcache/journal.h
++++ b/drivers/md/bcache/journal.h
+@@ -105,6 +105,7 @@ struct journal {
+ spinlock_t lock;
+ spinlock_t flush_write_lock;
+ bool btree_flushing;
++ bool do_reserve;
+ /* used when waiting because the journal was full */
+ struct closure_waitlist wait;
+ struct closure io;
+@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set
+
+ void bch_journal_free(struct cache_set *c);
+ int bch_journal_alloc(struct cache_set *c);
++void bch_journal_space_reserve(struct journal *j);
+
+ #endif /* _BCACHE_JOURNAL_H */
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2131,6 +2131,7 @@ static int run_cache_set(struct cache_se
+
+ flash_devs_run(c);
+
++ bch_journal_space_reserve(&c->journal);
+ set_bit(CACHE_SET_RUNNING, &c->flags);
+ return 0;
+ err:
--- /dev/null
+From 622536443b6731ec82c563aae7807165adbe9178 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:33 +0800
+Subject: bcache: improve multithreaded bch_btree_check()
+
+From: Coly Li <colyli@suse.de>
+
+commit 622536443b6731ec82c563aae7807165adbe9178 upstream.
+
+Commit 8e7102273f59 ("bcache: make bch_btree_check() to be
+multithreaded") makes bch_btree_check() to be much faster when checking
+all btree nodes during cache device registration. But it isn't in ideal
+shap yet, still can be improved.
+
+This patch does the following thing to improve current parallel btree
+nodes check by multiple threads in bch_btree_check(),
+- Add read lock to root node while checking all the btree nodes with
+ multiple threads. Although currently it is not mandatory but it is
+ good to have a read lock in code logic.
+- Remove local variable 'char name[32]', and generate kernel thread name
+ string directly when calling kthread_run().
+- Allocate local variable "struct btree_check_state check_state" on the
+ stack and avoid unnecessary dynamic memory allocation for it.
+- Reduce BCH_BTR_CHKTHREAD_MAX from 64 to 12 which is enough indeed.
+- Increase check_state->started to count created kernel thread after it
+ succeeds to create.
+- When wait for all checking kernel threads to finish, use wait_event()
+ to replace wait_event_interruptible().
+
+With this change, the code is more clear, and some potential error
+conditions are avoided.
+
+Fixes: 8e7102273f59 ("bcache: make bch_btree_check() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-2-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/btree.c | 58 ++++++++++++++++++++--------------------------
+ drivers/md/bcache/btree.h | 2 -
+ 2 files changed, 27 insertions(+), 33 deletions(-)
+
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
+ int i;
+ struct bkey *k = NULL;
+ struct btree_iter iter;
+- struct btree_check_state *check_state;
+- char name[32];
++ struct btree_check_state check_state;
+
+ /* check and mark root node keys */
+ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+@@ -2018,63 +2017,58 @@ int bch_btree_check(struct cache_set *c)
+ if (c->root->level == 0)
+ return 0;
+
+- check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
+- if (!check_state)
+- return -ENOMEM;
+-
+- check_state->c = c;
+- check_state->total_threads = bch_btree_chkthread_nr();
+- check_state->key_idx = 0;
+- spin_lock_init(&check_state->idx_lock);
+- atomic_set(&check_state->started, 0);
+- atomic_set(&check_state->enough, 0);
+- init_waitqueue_head(&check_state->wait);
++ check_state.c = c;
++ check_state.total_threads = bch_btree_chkthread_nr();
++ check_state.key_idx = 0;
++ spin_lock_init(&check_state.idx_lock);
++ atomic_set(&check_state.started, 0);
++ atomic_set(&check_state.enough, 0);
++ init_waitqueue_head(&check_state.wait);
+
++ rw_lock(0, c->root, c->root->level);
+ /*
+ * Run multiple threads to check btree nodes in parallel,
+- * if check_state->enough is non-zero, it means current
++ * if check_state.enough is non-zero, it means current
+ * running check threads are enough, unncessary to create
+ * more.
+ */
+- for (i = 0; i < check_state->total_threads; i++) {
+- /* fetch latest check_state->enough earlier */
++ for (i = 0; i < check_state.total_threads; i++) {
++ /* fetch latest check_state.enough earlier */
+ smp_mb__before_atomic();
+- if (atomic_read(&check_state->enough))
++ if (atomic_read(&check_state.enough))
+ break;
+
+- check_state->infos[i].result = 0;
+- check_state->infos[i].state = check_state;
+- snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
+- atomic_inc(&check_state->started);
++ check_state.infos[i].result = 0;
++ check_state.infos[i].state = &check_state;
+
+- check_state->infos[i].thread =
++ check_state.infos[i].thread =
+ kthread_run(bch_btree_check_thread,
+- &check_state->infos[i],
+- name);
+- if (IS_ERR(check_state->infos[i].thread)) {
++ &check_state.infos[i],
++ "bch_btrchk[%d]", i);
++ if (IS_ERR(check_state.infos[i].thread)) {
+ pr_err("fails to run thread bch_btrchk[%d]\n", i);
+ for (--i; i >= 0; i--)
+- kthread_stop(check_state->infos[i].thread);
++ kthread_stop(check_state.infos[i].thread);
+ ret = -ENOMEM;
+ goto out;
+ }
++ atomic_inc(&check_state.started);
+ }
+
+ /*
+ * Must wait for all threads to stop.
+ */
+- wait_event_interruptible(check_state->wait,
+- atomic_read(&check_state->started) == 0);
++ wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
+
+- for (i = 0; i < check_state->total_threads; i++) {
+- if (check_state->infos[i].result) {
+- ret = check_state->infos[i].result;
++ for (i = 0; i < check_state.total_threads; i++) {
++ if (check_state.infos[i].result) {
++ ret = check_state.infos[i].result;
+ goto out;
+ }
+ }
+
+ out:
+- kfree(check_state);
++ rw_unlock(0, c->root);
+ return ret;
+ }
+
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -226,7 +226,7 @@ struct btree_check_info {
+ int result;
+ };
+
+-#define BCH_BTR_CHKTHREAD_MAX 64
++#define BCH_BTR_CHKTHREAD_MAX 12
+ struct btree_check_state {
+ struct cache_set *c;
+ int total_threads;
--- /dev/null
+From 4dc34ae1b45fe26e772a44379f936c72623dd407 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:34 +0800
+Subject: bcache: improve multithreaded bch_sectors_dirty_init()
+
+From: Coly Li <colyli@suse.de>
+
+commit 4dc34ae1b45fe26e772a44379f936c72623dd407 upstream.
+
+Commit b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be
+multithreaded") makes bch_sectors_dirty_init() to be much faster
+when counting dirty sectors by iterating all dirty keys in the btree.
+But it isn't in ideal shape yet, still can be improved.
+
+This patch does the following changes to improve current parallel dirty
+keys iteration on the btree,
+- Add read lock to root node when multiple threads iterating the btree,
+ to prevent the root node gets split by I/Os from other registered
+ bcache devices.
+- Remove local variable "char name[32]" and generate kernel thread name
+ string directly when calling kthread_run().
+- Allocate "struct bch_dirty_init_state state" directly on stack and
+ avoid the unnecessary dynamic memory allocation for it.
+- Decrease BCH_DIRTY_INIT_THRD_MAX from 64 to 12 which is enough indeed.
+- Increase &state->started to count created kernel thread after it
+ succeeds to create.
+- When wait for all dirty key counting threads to finish, use
+ wait_event() to replace wait_event_interruptible().
+
+With the above changes, the code is more clear, and some potential error
+conditions are avoided.
+
+Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-3-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/writeback.c | 60 ++++++++++++++++--------------------------
+ drivers/md/bcache/writeback.h | 2 -
+ 2 files changed, 25 insertions(+), 37 deletions(-)
+
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -945,10 +945,10 @@ void bch_sectors_dirty_init(struct bcach
+ struct btree_iter iter;
+ struct sectors_dirty_init op;
+ struct cache_set *c = d->c;
+- struct bch_dirty_init_state *state;
+- char name[32];
++ struct bch_dirty_init_state state;
+
+ /* Just count root keys if no leaf node */
++ rw_lock(0, c->root, c->root->level);
+ if (c->root->level == 0) {
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+@@ -958,54 +958,42 @@ void bch_sectors_dirty_init(struct bcach
+ for_each_key_filter(&c->root->keys,
+ k, &iter, bch_ptr_invalid)
+ sectors_dirty_init_fn(&op.op, c->root, k);
++ rw_unlock(0, c->root);
+ return;
+ }
+
+- state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
+- if (!state) {
+- pr_warn("sectors dirty init failed: cannot allocate memory\n");
+- return;
+- }
++ state.c = c;
++ state.d = d;
++ state.total_threads = bch_btre_dirty_init_thread_nr();
++ state.key_idx = 0;
++ spin_lock_init(&state.idx_lock);
++ atomic_set(&state.started, 0);
++ atomic_set(&state.enough, 0);
++ init_waitqueue_head(&state.wait);
+
+- state->c = c;
+- state->d = d;
+- state->total_threads = bch_btre_dirty_init_thread_nr();
+- state->key_idx = 0;
+- spin_lock_init(&state->idx_lock);
+- atomic_set(&state->started, 0);
+- atomic_set(&state->enough, 0);
+- init_waitqueue_head(&state->wait);
+-
+- for (i = 0; i < state->total_threads; i++) {
+- /* Fetch latest state->enough earlier */
++ for (i = 0; i < state.total_threads; i++) {
++ /* Fetch latest state.enough earlier */
+ smp_mb__before_atomic();
+- if (atomic_read(&state->enough))
++ if (atomic_read(&state.enough))
+ break;
+
+- state->infos[i].state = state;
+- atomic_inc(&state->started);
+- snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
+-
+- state->infos[i].thread =
+- kthread_run(bch_dirty_init_thread,
+- &state->infos[i],
+- name);
+- if (IS_ERR(state->infos[i].thread)) {
++ state.infos[i].state = &state;
++ state.infos[i].thread =
++ kthread_run(bch_dirty_init_thread, &state.infos[i],
++ "bch_dirtcnt[%d]", i);
++ if (IS_ERR(state.infos[i].thread)) {
+ pr_err("fails to run thread bch_dirty_init[%d]\n", i);
+ for (--i; i >= 0; i--)
+- kthread_stop(state->infos[i].thread);
++ kthread_stop(state.infos[i].thread);
+ goto out;
+ }
++ atomic_inc(&state.started);
+ }
+
+- /*
+- * Must wait for all threads to stop.
+- */
+- wait_event_interruptible(state->wait,
+- atomic_read(&state->started) == 0);
+-
+ out:
+- kfree(state);
++ /* Must wait for all threads to stop. */
++ wait_event(state.wait, atomic_read(&state.started) == 0);
++ rw_unlock(0, c->root);
+ }
+
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -20,7 +20,7 @@
+ #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
+ #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
+
+-#define BCH_DIRTY_INIT_THRD_MAX 64
++#define BCH_DIRTY_INIT_THRD_MAX 12
+ /*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
--- /dev/null
+From 80db4e4707e78cb22287da7d058d7274bd4cb370 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:35 +0800
+Subject: bcache: remove incremental dirty sector counting for bch_sectors_dirty_init()
+
+From: Coly Li <colyli@suse.de>
+
+commit 80db4e4707e78cb22287da7d058d7274bd4cb370 upstream.
+
+After making bch_sectors_dirty_init() being multithreaded, the existing
+incremental dirty sector counting in bch_root_node_dirty_init() doesn't
+release btree occupation after iterating 500000 (INIT_KEYS_EACH_TIME)
+bkeys. Because a read lock is added on btree root node to prevent the
+btree to be split during the dirty sectors counting, other I/O requester
+has no chance to gain the write lock even restart bcache_btree().
+
+That is to say, the incremental dirty sectors counting is incompatible
+to the multhreaded bch_sectors_dirty_init(). We have to choose one and
+drop another one.
+
+In my testing, with 512 bytes random writes, I generate 1.2T dirty data
+and a btree with 400K nodes. With single thread and incremental dirty
+sectors counting, it takes 30+ minites to register the backing device.
+And with multithreaded dirty sectors counting, the backing device
+registration can be accomplished within 2 minutes.
+
+The 30+ minutes V.S. 2- minutes difference makes me decide to keep
+multithreaded bch_sectors_dirty_init() and drop the incremental dirty
+sectors counting. This is what this patch does.
+
+But INIT_KEYS_EACH_TIME is kept, in sectors_dirty_init_fn() the CPU
+will be released by cond_resched() after every INIT_KEYS_EACH_TIME keys
+iterated. This is to avoid the watchdog reports a bogus soft lockup
+warning.
+
+Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-4-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/writeback.c | 39 ++++++++++++---------------------------
+ 1 file changed, 12 insertions(+), 27 deletions(-)
+
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -802,13 +802,11 @@ static int bch_writeback_thread(void *ar
+
+ /* Init */
+ #define INIT_KEYS_EACH_TIME 500000
+-#define INIT_KEYS_SLEEP_MS 100
+
+ struct sectors_dirty_init {
+ struct btree_op op;
+ unsigned int inode;
+ size_t count;
+- struct bkey start;
+ };
+
+ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+@@ -824,11 +822,8 @@ static int sectors_dirty_init_fn(struct
+ KEY_START(k), KEY_SIZE(k));
+
+ op->count++;
+- if (atomic_read(&b->c->search_inflight) &&
+- !(op->count % INIT_KEYS_EACH_TIME)) {
+- bkey_copy_key(&op->start, k);
+- return -EAGAIN;
+- }
++ if (!(op->count % INIT_KEYS_EACH_TIME))
++ cond_resched();
+
+ return MAP_CONTINUE;
+ }
+@@ -843,24 +838,16 @@ static int bch_root_node_dirty_init(stru
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+- op.start = KEY(op.inode, 0, 0);
+
+- do {
+- ret = bcache_btree(map_keys_recurse,
+- k,
+- c->root,
+- &op.op,
+- &op.start,
+- sectors_dirty_init_fn,
+- 0);
+- if (ret == -EAGAIN)
+- schedule_timeout_interruptible(
+- msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
+- else if (ret < 0) {
+- pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+- break;
+- }
+- } while (ret == -EAGAIN);
++ ret = bcache_btree(map_keys_recurse,
++ k,
++ c->root,
++ &op.op,
++ &KEY(op.inode, 0, 0),
++ sectors_dirty_init_fn,
++ 0);
++ if (ret < 0)
++ pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+
+ return ret;
+ }
+@@ -904,7 +891,6 @@ static int bch_dirty_init_thread(void *a
+ goto out;
+ }
+ skip_nr--;
+- cond_resched();
+ }
+
+ if (p) {
+@@ -914,7 +900,6 @@ static int bch_dirty_init_thread(void *a
+
+ p = NULL;
+ prev_idx = cur_idx;
+- cond_resched();
+ }
+
+ out:
+@@ -953,11 +938,11 @@ void bch_sectors_dirty_init(struct bcach
+ bch_btree_op_init(&op.op, -1);
+ op.inode = d->id;
+ op.count = 0;
+- op.start = KEY(op.inode, 0, 0);
+
+ for_each_key_filter(&c->root->keys,
+ k, &iter, bch_ptr_invalid)
+ sectors_dirty_init_fn(&op.op, c->root, k);
++
+ rw_unlock(0, c->root);
+ return;
+ }
--- /dev/null
+From 8a177a36da6c54c98b8685d4f914cb3637d53c0d Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 13 May 2022 20:55:45 -1000
+Subject: blk-iolatency: Fix inflight count imbalances and IO hangs on offline
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 8a177a36da6c54c98b8685d4f914cb3637d53c0d upstream.
+
+iolatency needs to track the number of inflight IOs per cgroup. As this
+tracking can be expensive, it is disabled when no cgroup has iolatency
+configured for the device. To ensure that the inflight counters stay
+balanced, iolatency_set_limit() freezes the request_queue while manipulating
+the enabled counter, which ensures that no IO is in flight and thus all
+counters are zero.
+
+Unfortunately, iolatency_set_limit() isn't the only place where the enabled
+counter is manipulated. iolatency_pd_offline() can also dec the counter and
+trigger disabling. As this disabling happens without freezing the q, this
+can easily happen while some IOs are in flight and thus leak the counts.
+
+This can be easily demonstrated by turning on iolatency on an one empty
+cgroup while IOs are in flight in other cgroups and then removing the
+cgroup. Note that iolatency shouldn't have been enabled elsewhere in the
+system to ensure that removing the cgroup disables iolatency for the whole
+device.
+
+The following keeps flipping on and off iolatency on sda:
+
+ echo +io > /sys/fs/cgroup/cgroup.subtree_control
+ while true; do
+ mkdir -p /sys/fs/cgroup/test
+ echo '8:0 target=100000' > /sys/fs/cgroup/test/io.latency
+ sleep 1
+ rmdir /sys/fs/cgroup/test
+ sleep 1
+ done
+
+and there's concurrent fio generating direct rand reads:
+
+ fio --name test --filename=/dev/sda --direct=1 --rw=randread \
+ --runtime=600 --time_based --iodepth=256 --numjobs=4 --bs=4k
+
+while monitoring with the following drgn script:
+
+ while True:
+ for css in css_for_each_descendant_pre(prog['blkcg_root'].css.address_of_()):
+ for pos in hlist_for_each(container_of(css, 'struct blkcg', 'css').blkg_list):
+ blkg = container_of(pos, 'struct blkcg_gq', 'blkcg_node')
+ pd = blkg.pd[prog['blkcg_policy_iolatency'].plid]
+ if pd.value_() == 0:
+ continue
+ iolat = container_of(pd, 'struct iolatency_grp', 'pd')
+ inflight = iolat.rq_wait.inflight.counter.value_()
+ if inflight:
+ print(f'inflight={inflight} {disk_name(blkg.q.disk).decode("utf-8")} '
+ f'{cgroup_path(css.cgroup).decode("utf-8")}')
+ time.sleep(1)
+
+The monitoring output looks like the following:
+
+ inflight=1 sda /user.slice
+ inflight=1 sda /user.slice
+ ...
+ inflight=14 sda /user.slice
+ inflight=13 sda /user.slice
+ inflight=17 sda /user.slice
+ inflight=15 sda /user.slice
+ inflight=18 sda /user.slice
+ inflight=17 sda /user.slice
+ inflight=20 sda /user.slice
+ inflight=19 sda /user.slice <- fio stopped, inflight stuck at 19
+ inflight=19 sda /user.slice
+ inflight=19 sda /user.slice
+
+If a cgroup with stuck inflight ends up getting throttled, the throttled IOs
+will never get issued as there's no completion event to wake it up leading
+to an indefinite hang.
+
+This patch fixes the bug by unifying enable handling into a work item which
+is automatically kicked off from iolatency_set_min_lat_nsec() which is
+called from both iolatency_set_limit() and iolatency_pd_offline() paths.
+Punting to a work item is necessary as iolatency_pd_offline() is called
+under spinlocks while freezing a request_queue requires a sleepable context.
+
+This also simplifies the code reducing LOC sans the comments and avoids the
+unnecessary freezes which were happening whenever a cgroup's latency target
+is newly set or cleared.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Josef Bacik <josef@toxicpanda.com>
+Cc: Liu Bo <bo.liu@linux.alibaba.com>
+Fixes: 8c772a9bfc7c ("blk-iolatency: fix IO hang due to negative inflight counter")
+Cc: stable@vger.kernel.org # v5.0+
+Link: https://lore.kernel.org/r/Yn9ScX6Nx2qIiQQi@slm.duckdns.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-iolatency.c | 122 ++++++++++++++++++++++++++------------------------
+ 1 file changed, 64 insertions(+), 58 deletions(-)
+
+--- a/block/blk-iolatency.c
++++ b/block/blk-iolatency.c
+@@ -86,7 +86,17 @@ struct iolatency_grp;
+ struct blk_iolatency {
+ struct rq_qos rqos;
+ struct timer_list timer;
+- atomic_t enabled;
++
++ /*
++ * ->enabled is the master enable switch gating the throttling logic and
++ * inflight tracking. The number of cgroups which have iolat enabled is
++ * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
++ * from ->enable_work with the request_queue frozen. For details, See
++ * blkiolatency_enable_work_fn().
++ */
++ bool enabled;
++ atomic_t enable_cnt;
++ struct work_struct enable_work;
+ };
+
+ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
+@@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKI
+ return container_of(rqos, struct blk_iolatency, rqos);
+ }
+
+-static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
+-{
+- return atomic_read(&blkiolat->enabled) > 0;
+-}
+-
+ struct child_latency_info {
+ spinlock_t lock;
+
+@@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(str
+ struct blkcg_gq *blkg = bio->bi_blkg;
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+
+- if (!blk_iolatency_enabled(blkiolat))
++ if (!blkiolat->enabled)
+ return;
+
+ while (blkg && blkg->parent) {
+@@ -593,7 +598,6 @@ static void blkcg_iolatency_done_bio(str
+ u64 window_start;
+ u64 now;
+ bool issue_as_root = bio_issue_as_root_blkg(bio);
+- bool enabled = false;
+ int inflight = 0;
+
+ blkg = bio->bi_blkg;
+@@ -604,8 +608,7 @@ static void blkcg_iolatency_done_bio(str
+ if (!iolat)
+ return;
+
+- enabled = blk_iolatency_enabled(iolat->blkiolat);
+- if (!enabled)
++ if (!iolat->blkiolat->enabled)
+ return;
+
+ now = ktime_to_ns(ktime_get());
+@@ -644,6 +647,7 @@ static void blkcg_iolatency_exit(struct
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+
+ del_timer_sync(&blkiolat->timer);
++ flush_work(&blkiolat->enable_work);
+ blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
+ kfree(blkiolat);
+ }
+@@ -715,6 +719,44 @@ next:
+ rcu_read_unlock();
+ }
+
++/**
++ * blkiolatency_enable_work_fn - Enable or disable iolatency on the device
++ * @work: enable_work of the blk_iolatency of interest
++ *
++ * iolatency needs to keep track of the number of in-flight IOs per cgroup. This
++ * is relatively expensive as it involves walking up the hierarchy twice for
++ * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
++ * want to disable the in-flight tracking.
++ *
++ * We have to make sure that the counting is balanced - we don't want to leak
++ * the in-flight counts by disabling accounting in the completion path while IOs
++ * are in flight. This is achieved by ensuring that no IO is in flight by
++ * freezing the queue while flipping ->enabled. As this requires a sleepable
++ * context, ->enabled flipping is punted to this work function.
++ */
++static void blkiolatency_enable_work_fn(struct work_struct *work)
++{
++ struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
++ enable_work);
++ bool enabled;
++
++ /*
++ * There can only be one instance of this function running for @blkiolat
++ * and it's guaranteed to be executed at least once after the latest
++ * ->enabled_cnt modification. Acting on the latest ->enable_cnt is
++ * sufficient.
++ *
++ * Also, we know @blkiolat is safe to access as ->enable_work is flushed
++ * in blkcg_iolatency_exit().
++ */
++ enabled = atomic_read(&blkiolat->enable_cnt);
++ if (enabled != blkiolat->enabled) {
++ blk_mq_freeze_queue(blkiolat->rqos.q);
++ blkiolat->enabled = enabled;
++ blk_mq_unfreeze_queue(blkiolat->rqos.q);
++ }
++}
++
+ int blk_iolatency_init(struct request_queue *q)
+ {
+ struct blk_iolatency *blkiolat;
+@@ -740,17 +782,15 @@ int blk_iolatency_init(struct request_qu
+ }
+
+ timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
++ INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
+
+ return 0;
+ }
+
+-/*
+- * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+- * return 0.
+- */
+-static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
++static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+ {
+ struct iolatency_grp *iolat = blkg_to_lat(blkg);
++ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ u64 oldval = iolat->min_lat_nsec;
+
+ iolat->min_lat_nsec = val;
+@@ -758,13 +798,15 @@ static int iolatency_set_min_lat_nsec(st
+ iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
+ BLKIOLATENCY_MAX_WIN_SIZE);
+
+- if (!oldval && val)
+- return 1;
++ if (!oldval && val) {
++ if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
++ schedule_work(&blkiolat->enable_work);
++ }
+ if (oldval && !val) {
+ blkcg_clear_delay(blkg);
+- return -1;
++ if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
++ schedule_work(&blkiolat->enable_work);
+ }
+- return 0;
+ }
+
+ static void iolatency_clear_scaling(struct blkcg_gq *blkg)
+@@ -796,7 +838,6 @@ static ssize_t iolatency_set_limit(struc
+ u64 lat_val = 0;
+ u64 oldval;
+ int ret;
+- int enable = 0;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
+ if (ret)
+@@ -831,41 +872,12 @@ static ssize_t iolatency_set_limit(struc
+ blkg = ctx.blkg;
+ oldval = iolat->min_lat_nsec;
+
+- enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+- if (enable) {
+- if (!blk_get_queue(blkg->q)) {
+- ret = -ENODEV;
+- goto out;
+- }
+-
+- blkg_get(blkg);
+- }
+-
+- if (oldval != iolat->min_lat_nsec) {
++ iolatency_set_min_lat_nsec(blkg, lat_val);
++ if (oldval != iolat->min_lat_nsec)
+ iolatency_clear_scaling(blkg);
+- }
+-
+ ret = 0;
+ out:
+ blkg_conf_finish(&ctx);
+- if (ret == 0 && enable) {
+- struct iolatency_grp *tmp = blkg_to_lat(blkg);
+- struct blk_iolatency *blkiolat = tmp->blkiolat;
+-
+- blk_mq_freeze_queue(blkg->q);
+-
+- if (enable == 1)
+- atomic_inc(&blkiolat->enabled);
+- else if (enable == -1)
+- atomic_dec(&blkiolat->enabled);
+- else
+- WARN_ON_ONCE(1);
+-
+- blk_mq_unfreeze_queue(blkg->q);
+-
+- blkg_put(blkg);
+- blk_put_queue(blkg->q);
+- }
+ return ret ?: nbytes;
+ }
+
+@@ -1006,14 +1018,8 @@ static void iolatency_pd_offline(struct
+ {
+ struct iolatency_grp *iolat = pd_to_lat(pd);
+ struct blkcg_gq *blkg = lat_to_blkg(iolat);
+- struct blk_iolatency *blkiolat = iolat->blkiolat;
+- int ret;
+
+- ret = iolatency_set_min_lat_nsec(blkg, 0);
+- if (ret == 1)
+- atomic_inc(&blkiolat->enabled);
+- if (ret == -1)
+- atomic_dec(&blkiolat->enabled);
++ iolatency_set_min_lat_nsec(blkg, 0);
+ iolatency_clear_scaling(blkg);
+ }
+
--- /dev/null
+From 54a6f29522da3c914da30e50721dedf51046449a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Mon, 28 Mar 2022 20:28:20 +0800
+Subject: carl9170: tx: fix an incorrect use of list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 54a6f29522da3c914da30e50721dedf51046449a upstream.
+
+If the previous list_for_each_entry_continue_rcu() don't exit early
+(no goto hit inside the loop), the iterator 'cvif' after the loop
+will be a bogus pointer to an invalid structure object containing
+the HEAD (&ar->vif_list). As a result, the use of 'cvif' after that
+will lead to a invalid memory access (i.e., 'cvif->id': the invalid
+pointer dereference when return back to/after the callsite in the
+carl9170_update_beacon()).
+
+The original intention should have been to return the valid 'cvif'
+when found in list, NULL otherwise. So just return NULL when no
+entry found, to fix this bug.
+
+Cc: stable@vger.kernel.org
+Fixes: 1f1d9654e183c ("carl9170: refactor carl9170_update_beacon")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Acked-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
+Link: https://lore.kernel.org/r/20220328122820.1004-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/carl9170/tx.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -1558,6 +1558,9 @@ static struct carl9170_vif_info *carl917
+ goto out;
+ }
+ } while (ar->beacon_enabled && i--);
++
++ /* no entry found in list */
++ return NULL;
+ }
+
+ out:
--- /dev/null
+From 7bc7981eeebe1b8e603ad2ffc5e84f4df76920dd Mon Sep 17 00:00:00 2001
+From: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+Date: Thu, 14 Apr 2022 13:50:03 +0100
+Subject: cfg80211: declare MODULE_FIRMWARE for regulatory.db
+
+From: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+
+commit 7bc7981eeebe1b8e603ad2ffc5e84f4df76920dd upstream.
+
+Add MODULE_FIRMWARE declarations for regulatory.db and
+regulatory.db.p7s such that userspace tooling can discover and include
+these files.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
+Link: https://lore.kernel.org/r/20220414125004.267819-1-dimitri.ledkov@canonical.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/wireless/reg.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -806,6 +806,8 @@ static int __init load_builtin_regdb_key
+ return 0;
+ }
+
++MODULE_FIRMWARE("regulatory.db.p7s");
++
+ static bool regdb_has_valid_signature(const u8 *data, unsigned int size)
+ {
+ const struct firmware *sig;
+@@ -1077,6 +1079,8 @@ static void regdb_fw_cb(const struct fir
+ release_firmware(fw);
+ }
+
++MODULE_FIRMWARE("regulatory.db");
++
+ static int query_regdb_file(const char *alpha2)
+ {
+ ASSERT_RTNL();
--- /dev/null
+From 23a43cc437e747473d5f8f98b4fe189fb5c433b7 Mon Sep 17 00:00:00 2001
+From: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+Date: Fri, 29 Apr 2022 13:58:43 +0100
+Subject: clk: tegra: Add missing reset deassertion
+
+From: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+
+commit 23a43cc437e747473d5f8f98b4fe189fb5c433b7 upstream.
+
+Commit 4782c0a5dd88 ("clk: tegra: Don't deassert reset on enabling
+clocks") removed deassertion of reset lines when enabling peripheral
+clocks. This breaks the initialization of the DFLL driver which relied
+on this behaviour.
+
+Fix this problem by adding explicit deassert/assert requests to the
+driver. Tested on Google Pixel C.
+
+Cc: stable@vger.kernel.org
+Fixes: 4782c0a5dd88 ("clk: tegra: Don't deassert reset on enabling clocks")
+Signed-off-by: Diogo Ivo <diogo.ivo@tecnico.ulisboa.pt>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/clk/tegra/clk-dfll.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/clk/tegra/clk-dfll.c
++++ b/drivers/clk/tegra/clk-dfll.c
+@@ -271,6 +271,7 @@ struct tegra_dfll {
+ struct clk *ref_clk;
+ struct clk *i2c_clk;
+ struct clk *dfll_clk;
++ struct reset_control *dfll_rst;
+ struct reset_control *dvco_rst;
+ unsigned long ref_rate;
+ unsigned long i2c_clk_rate;
+@@ -1464,6 +1465,7 @@ static int dfll_init(struct tegra_dfll *
+ return -EINVAL;
+ }
+
++ reset_control_deassert(td->dfll_rst);
+ reset_control_deassert(td->dvco_rst);
+
+ ret = clk_prepare(td->ref_clk);
+@@ -1509,6 +1511,7 @@ di_err1:
+ clk_unprepare(td->ref_clk);
+
+ reset_control_assert(td->dvco_rst);
++ reset_control_assert(td->dfll_rst);
+
+ return ret;
+ }
+@@ -1530,6 +1533,7 @@ int tegra_dfll_suspend(struct device *de
+ }
+
+ reset_control_assert(td->dvco_rst);
++ reset_control_assert(td->dfll_rst);
+
+ return 0;
+ }
+@@ -1548,6 +1552,7 @@ int tegra_dfll_resume(struct device *dev
+ {
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
++ reset_control_deassert(td->dfll_rst);
+ reset_control_deassert(td->dvco_rst);
+
+ pm_runtime_get_sync(td->dev);
+@@ -1951,6 +1956,12 @@ int tegra_dfll_register(struct platform_
+
+ td->soc = soc;
+
++ td->dfll_rst = devm_reset_control_get_optional(td->dev, "dfll");
++ if (IS_ERR(td->dfll_rst)) {
++ dev_err(td->dev, "couldn't get dfll reset\n");
++ return PTR_ERR(td->dfll_rst);
++ }
++
+ td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
+ if (IS_ERR(td->dvco_rst)) {
+ dev_err(td->dev, "couldn't get dvco reset\n");
+@@ -2087,6 +2098,7 @@ struct tegra_dfll_soc_data *tegra_dfll_u
+ clk_unprepare(td->i2c_clk);
+
+ reset_control_assert(td->dvco_rst);
++ reset_control_assert(td->dfll_rst);
+
+ return td->soc;
+ }
--- /dev/null
+From 8c1d3f79d9ca48e406b78e90e94cf09a8c076bf2 Mon Sep 17 00:00:00 2001
+From: Mao Jinlong <quic_jinlmao@quicinc.com>
+Date: Wed, 9 Mar 2022 06:22:06 -0800
+Subject: coresight: core: Fix coresight device probe failure issue
+
+From: Mao Jinlong <quic_jinlmao@quicinc.com>
+
+commit 8c1d3f79d9ca48e406b78e90e94cf09a8c076bf2 upstream.
+
+It is possibe that probe failure issue happens when the device
+and its child_device's probe happens at the same time.
+In coresight_make_links, has_conns_grp is true for parent, but
+has_conns_grp is false for child device as has_conns_grp is set
+to true in coresight_create_conns_sysfs_group. The probe of parent
+device will fail at this condition. Add has_conns_grp check for
+child device before make the links and make the process from
+device_register to connection_create be atomic to avoid this
+probe failure issue.
+
+Cc: stable@vger.kernel.org
+Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Suggested-by: Mike Leach <mike.leach@linaro.org>
+Signed-off-by: Mao Jinlong <quic_jinlmao@quicinc.com>
+Link: https://lore.kernel.org/r/20220309142206.15632-1-quic_jinlmao@quicinc.com
+[ Added Cc stable ]
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwtracing/coresight/coresight-core.c | 33 ++++++++++++++++++---------
+ 1 file changed, 22 insertions(+), 11 deletions(-)
+
+--- a/drivers/hwtracing/coresight/coresight-core.c
++++ b/drivers/hwtracing/coresight/coresight-core.c
+@@ -1382,7 +1382,7 @@ static int coresight_fixup_device_conns(
+ continue;
+ conn->child_dev =
+ coresight_find_csdev_by_fwnode(conn->child_fwnode);
+- if (conn->child_dev) {
++ if (conn->child_dev && conn->child_dev->has_conns_grp) {
+ ret = coresight_make_links(csdev, conn,
+ conn->child_dev);
+ if (ret)
+@@ -1574,6 +1574,7 @@ struct coresight_device *coresight_regis
+ int nr_refcnts = 1;
+ atomic_t *refcnts = NULL;
+ struct coresight_device *csdev;
++ bool registered = false;
+
+ csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
+ if (!csdev) {
+@@ -1594,7 +1595,8 @@ struct coresight_device *coresight_regis
+ refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
+ if (!refcnts) {
+ ret = -ENOMEM;
+- goto err_free_csdev;
++ kfree(csdev);
++ goto err_out;
+ }
+
+ csdev->refcnt = refcnts;
+@@ -1619,6 +1621,13 @@ struct coresight_device *coresight_regis
+ csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
+ dev_set_name(&csdev->dev, "%s", desc->name);
+
++ /*
++ * Make sure the device registration and the connection fixup
++ * are synchronised, so that we don't see uninitialised devices
++ * on the coresight bus while trying to resolve the connections.
++ */
++ mutex_lock(&coresight_mutex);
++
+ ret = device_register(&csdev->dev);
+ if (ret) {
+ put_device(&csdev->dev);
+@@ -1626,7 +1635,7 @@ struct coresight_device *coresight_regis
+ * All resources are free'd explicitly via
+ * coresight_device_release(), triggered from put_device().
+ */
+- goto err_out;
++ goto out_unlock;
+ }
+
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+@@ -1641,11 +1650,11 @@ struct coresight_device *coresight_regis
+ * from put_device(), which is in turn called from
+ * function device_unregister().
+ */
+- goto err_out;
++ goto out_unlock;
+ }
+ }
+-
+- mutex_lock(&coresight_mutex);
++ /* Device is now registered */
++ registered = true;
+
+ ret = coresight_create_conns_sysfs_group(csdev);
+ if (!ret)
+@@ -1655,16 +1664,18 @@ struct coresight_device *coresight_regis
+ if (!ret && cti_assoc_ops && cti_assoc_ops->add)
+ cti_assoc_ops->add(csdev);
+
++out_unlock:
+ mutex_unlock(&coresight_mutex);
+- if (ret) {
++ /* Success */
++ if (!ret)
++ return csdev;
++
++ /* Unregister the device if needed */
++ if (registered) {
+ coresight_unregister(csdev);
+ return ERR_PTR(ret);
+ }
+
+- return csdev;
+-
+-err_free_csdev:
+- kfree(csdev);
+ err_out:
+ /* Cleanup the connection information */
+ coresight_release_platform_data(NULL, desc->pdata);
--- /dev/null
+From 8c4d16471e2babe9bdfe41d6ef724526629696cb Mon Sep 17 00:00:00 2001
+From: Guo Ren <guoren@linux.alibaba.com>
+Date: Wed, 6 Apr 2022 22:28:43 +0800
+Subject: csky: patch_text: Fixup last cpu should be master
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+commit 8c4d16471e2babe9bdfe41d6ef724526629696cb upstream.
+
+These patch_text implementations are using stop_machine_cpuslocked
+infrastructure with atomic cpu_count. The original idea: When the
+master CPU patch_text, the others should wait for it. But current
+implementation is using the first CPU as master, which couldn't
+guarantee the remaining CPUs are waiting. This patch changes the
+last CPU as the master to solve the potential risk.
+
+Fixes: 33e53ae1ce41 ("csky: Add kprobes supported")
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Guo Ren <guoren@kernel.org>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/csky/kernel/probes/kprobes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/csky/kernel/probes/kprobes.c
++++ b/arch/csky/kernel/probes/kprobes.c
+@@ -28,7 +28,7 @@ static int __kprobes patch_text_cb(void
+ struct csky_insn_patch *param = priv;
+ unsigned int addr = (unsigned int)param->addr;
+
+- if (atomic_inc_return(¶m->cpu_count) == 1) {
++ if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
+ *(u16 *) addr = cpu_to_le16(param->opcode);
+ dcache_wb_range(addr, addr + 2);
+ atomic_inc(¶m->cpu_count);
--- /dev/null
+From 627f01eab93d8671d4e4afee9b148f9998d20e7c Mon Sep 17 00:00:00 2001
+From: Akira Yokosawa <akiyks@gmail.com>
+Date: Wed, 1 Jun 2022 23:34:06 +0900
+Subject: docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0
+
+From: Akira Yokosawa <akiyks@gmail.com>
+
+commit 627f01eab93d8671d4e4afee9b148f9998d20e7c upstream.
+
+One of the changes in Sphinx 5.0.0 [1] says [sic]:
+
+ 5.0.0 final
+
+ - #10474: language does not accept None as it value.
+ The default value of language becomes to 'en' now.
+
+[1]: https://www.sphinx-doc.org/en/master/changes.html#release-5-0-0-released-may-30-2022
+
+It results in a new warning from Sphinx 5.0.0 [sic]:
+
+ WARNING: Invalid configuration value found: 'language = None'.
+ Update your configuration to a valid langauge code. Falling
+ back to 'en' (English).
+
+Silence the warning by using 'en'.
+It works with all the Sphinx versions required for building
+kernel documentation (1.7.9 or later).
+
+Signed-off-by: Akira Yokosawa <akiyks@gmail.com>
+Link: https://lore.kernel.org/r/bd0c2ddc-2401-03cb-4526-79ca664e1cbe@gmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Jonathan Corbet <corbet@lwn.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/conf.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Documentation/conf.py
++++ b/Documentation/conf.py
+@@ -161,7 +161,7 @@ finally:
+ #
+ # This is also used if you do content translation via gettext catalogs.
+ # Usually you set "language" from the command line for these cases.
+-language = None
++language = 'en'
+
+ # There are two options for replacing |today|: either, you set today to some
+ # non-false value, then it is used:
--- /dev/null
+From 3a21c3ac93aff7b4522b152399df8f6a041df56d Mon Sep 17 00:00:00 2001
+From: Dinh Nguyen <dinguyen@kernel.org>
+Date: Wed, 11 May 2022 12:54:46 -0500
+Subject: dt-bindings: gpio: altera: correct interrupt-cells
+
+From: Dinh Nguyen <dinguyen@kernel.org>
+
+commit 3a21c3ac93aff7b4522b152399df8f6a041df56d upstream.
+
+update documentation to correctly state the interrupt-cells to be 2.
+
+Cc: stable@vger.kernel.org
+Fixes: 4fd9bbc6e071 ("drivers/gpio: Altera soft IP GPIO driver devicetree binding")
+Signed-off-by: Dinh Nguyen <dinguyen@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/gpio/gpio-altera.txt | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/Documentation/devicetree/bindings/gpio/gpio-altera.txt
++++ b/Documentation/devicetree/bindings/gpio/gpio-altera.txt
+@@ -9,8 +9,9 @@ Required properties:
+ - The second cell is reserved and is currently unused.
+ - gpio-controller : Marks the device node as a GPIO controller.
+ - interrupt-controller: Mark the device node as an interrupt controller
+-- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
++- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
+ - The first cell is the GPIO offset number within the GPIO controller.
++ - The second cell is the interrupt trigger type and level flags.
+ - interrupts: Specify the interrupt.
+ - altr,interrupt-type: Specifies the interrupt trigger type the GPIO
+ hardware is synthesized. This field is required if the Altera GPIO controller
+@@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
+ altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
+ #gpio-cells = <2>;
+ gpio-controller;
+- #interrupt-cells = <1>;
++ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
--- /dev/null
+From 7d54c15cb89a29a5f59e5ffc9ee62e6591769ef1 Mon Sep 17 00:00:00 2001
+From: Song Liu <song@kernel.org>
+Date: Tue, 24 May 2022 10:08:39 -0700
+Subject: ftrace: Clean up hash direct_functions on register failures
+
+From: Song Liu <song@kernel.org>
+
+commit 7d54c15cb89a29a5f59e5ffc9ee62e6591769ef1 upstream.
+
+We see the following GPF when register_ftrace_direct fails:
+
+[ ] general protection fault, probably for non-canonical address \
+ 0x200000000000010: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC PTI
+[...]
+[ ] RIP: 0010:ftrace_find_rec_direct+0x53/0x70
+[ ] Code: 48 c1 e0 03 48 03 42 08 48 8b 10 31 c0 48 85 d2 74 [...]
+[ ] RSP: 0018:ffffc9000138bc10 EFLAGS: 00010206
+[ ] RAX: 0000000000000000 RBX: ffffffff813e0df0 RCX: 000000000000003b
+[ ] RDX: 0200000000000000 RSI: 000000000000000c RDI: ffffffff813e0df0
+[ ] RBP: ffffffffa00a3000 R08: ffffffff81180ce0 R09: 0000000000000001
+[ ] R10: ffffc9000138bc18 R11: 0000000000000001 R12: ffffffff813e0df0
+[ ] R13: ffffffff813e0df0 R14: ffff888171b56400 R15: 0000000000000000
+[ ] FS: 00007fa9420c7780(0000) GS:ffff888ff6a00000(0000) knlGS:000000000
+[ ] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ ] CR2: 000000000770d000 CR3: 0000000107d50003 CR4: 0000000000370ee0
+[ ] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ ] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ ] Call Trace:
+[ ] <TASK>
+[ ] register_ftrace_direct+0x54/0x290
+[ ] ? render_sigset_t+0xa0/0xa0
+[ ] bpf_trampoline_update+0x3f5/0x4a0
+[ ] ? 0xffffffffa00a3000
+[ ] bpf_trampoline_link_prog+0xa9/0x140
+[ ] bpf_tracing_prog_attach+0x1dc/0x450
+[ ] bpf_raw_tracepoint_open+0x9a/0x1e0
+[ ] ? find_held_lock+0x2d/0x90
+[ ] ? lock_release+0x150/0x430
+[ ] __sys_bpf+0xbd6/0x2700
+[ ] ? lock_is_held_type+0xd8/0x130
+[ ] __x64_sys_bpf+0x1c/0x20
+[ ] do_syscall_64+0x3a/0x80
+[ ] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ ] RIP: 0033:0x7fa9421defa9
+[ ] Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 9 f8 [...]
+[ ] RSP: 002b:00007ffed743bd78 EFLAGS: 00000246 ORIG_RAX: 0000000000000141
+[ ] RAX: ffffffffffffffda RBX: 00000000069d2480 RCX: 00007fa9421defa9
+[ ] RDX: 0000000000000078 RSI: 00007ffed743bd80 RDI: 0000000000000011
+[ ] RBP: 00007ffed743be00 R08: 0000000000bb7270 R09: 0000000000000000
+[ ] R10: 00000000069da210 R11: 0000000000000246 R12: 0000000000000001
+[ ] R13: 00007ffed743c4b0 R14: 00000000069d2480 R15: 0000000000000001
+[ ] </TASK>
+[ ] Modules linked in: klp_vm(OK)
+[ ] ---[ end trace 0000000000000000 ]---
+
+One way to trigger this is:
+ 1. load a livepatch that patches kernel function xxx;
+ 2. run bpftrace -e 'kfunc:xxx {}', this will fail (expected for now);
+ 3. repeat #2 => gpf.
+
+This is because the entry is added to direct_functions, but not removed.
+Fix this by remove the entry from direct_functions when
+register_ftrace_direct fails.
+
+Also remove the last trailing space from ftrace.c, so we don't have to
+worry about it anymore.
+
+Link: https://lkml.kernel.org/r/20220524170839.900849-1-song@kernel.org
+
+Cc: stable@vger.kernel.org
+Fixes: 763e34e74bb7 ("ftrace: Add register_ftrace_direct()")
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4420,7 +4420,7 @@ int ftrace_func_mapper_add_ip(struct ftr
+ * @ip: The instruction pointer address to remove the data from
+ *
+ * Returns the data if it is found, otherwise NULL.
+- * Note, if the data pointer is used as the data itself, (see
++ * Note, if the data pointer is used as the data itself, (see
+ * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
+ * if the data pointer was set to zero.
+ */
+@@ -5146,8 +5146,6 @@ int register_ftrace_direct(unsigned long
+ __add_hash_entry(direct_functions, entry);
+
+ ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
+- if (ret)
+- remove_hash_entry(direct_functions, entry);
+
+ if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
+ ret = register_ftrace_function(&direct_ops);
+@@ -5156,6 +5154,7 @@ int register_ftrace_direct(unsigned long
+ }
+
+ if (ret) {
++ remove_hash_entry(direct_functions, entry);
+ kfree(entry);
+ if (!direct->count) {
+ list_del_rcu(&direct->next);
--- /dev/null
+From bdef417d84536715145f6dc9cc3275c46f26295a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 27 Mar 2022 13:20:28 +0800
+Subject: gma500: fix an incorrect NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit bdef417d84536715145f6dc9cc3275c46f26295a upstream.
+
+The bug is here:
+ return crtc;
+
+The list iterator value 'crtc' will *always* be set and non-NULL by
+list_for_each_entry(), so it is incorrect to assume that the iterator
+value will be NULL if the list is empty or no element is found.
+
+To fix the bug, return 'crtc' when found, otherwise return NULL.
+
+Cc: stable@vger.kernel.org
+fixes: 89c78134cc54d ("gma500: Add Poulsbo support")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220327052028.2013-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/gma500/psb_intel_display.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/gma500/psb_intel_display.c
++++ b/drivers/gpu/drm/gma500/psb_intel_display.c
+@@ -536,14 +536,15 @@ void psb_intel_crtc_init(struct drm_devi
+
+ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+ {
+- struct drm_crtc *crtc = NULL;
++ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
++
+ if (gma_crtc->pipe == pipe)
+- break;
++ return crtc;
+ }
+- return crtc;
++ return NULL;
+ }
+
+ int gma_connector_clones(struct drm_device *dev, int type_mask)
--- /dev/null
+From 48381273f8734d28ef56a5bdf1966dd8530111bc Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Tue, 24 May 2022 13:50:03 -0700
+Subject: hugetlb: fix huge_pmd_unshare address update
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit 48381273f8734d28ef56a5bdf1966dd8530111bc upstream.
+
+The routine huge_pmd_unshare() is passed a pointer to an address
+associated with an area which may be unshared. If unshare is successful
+this address is updated to 'optimize' callers iterating over huge page
+addresses. For the optimization to work correctly, address should be
+updated to the last huge page in the unmapped/unshared area. However, in
+the common case where the passed address is PUD_SIZE aligned, the address
+is incorrectly updated to the address of the preceding huge page. That
+wastes CPU cycles as the unmapped/unshared range is scanned twice.
+
+Link: https://lkml.kernel.org/r/20220524205003.126184-1-mike.kravetz@oracle.com
+Fixes: 39dde65c9940 ("shared page table for hugetlb page")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: Muchun Song <songmuchun@bytedance.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6060,7 +6060,14 @@ int huge_pmd_unshare(struct mm_struct *m
+ pud_clear(pud);
+ put_page(virt_to_page(ptep));
+ mm_dec_nr_pmds(mm);
+- *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
++ /*
++ * This update of passed address optimizes loops sequentially
++ * processing addresses in increments of huge page size (PMD_SIZE
++ * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
++ * Update address to the 'last page' in the cleared area so that
++ * calling loop can move to first page past this area.
++ */
++ *addr |= PUD_SIZE - PMD_SIZE;
+ return 1;
+ }
+
--- /dev/null
+From 891163adf180bc369b2f11c9dfce6d2758d2a5bd Mon Sep 17 00:00:00 2001
+From: GUO Zihua <guozihua@huawei.com>
+Date: Thu, 7 Apr 2022 10:16:19 +0800
+Subject: ima: remove the IMA_TEMPLATE Kconfig option
+
+From: GUO Zihua <guozihua@huawei.com>
+
+commit 891163adf180bc369b2f11c9dfce6d2758d2a5bd upstream.
+
+The original 'ima' measurement list template contains a hash, defined
+as 20 bytes, and a null terminated pathname, limited to 255
+characters. Other measurement list templates permit both larger hashes
+and longer pathnames. When the "ima" template is configured as the
+default, a new measurement list template (ima_template=) must be
+specified before specifying a larger hash algorithm (ima_hash=) on the
+boot command line.
+
+To avoid this boot command line ordering issue, remove the legacy "ima"
+template configuration option, allowing it to still be specified on the
+boot command line.
+
+The root cause of this issue is that during the processing of ima_hash,
+we would try to check whether the hash algorithm is compatible with the
+template. If the template is not set at the moment we do the check, we
+check the algorithm against the configured default template. If the
+default template is "ima", then we reject any hash algorithm other than
+sha1 and md5.
+
+For example, if the compiled default template is "ima", and the default
+algorithm is sha1 (which is the current default). In the cmdline, we put
+in "ima_hash=sha256 ima_template=ima-ng". The expected behavior would be
+that ima starts with ima-ng as the template and sha256 as the hash
+algorithm. However, during the processing of "ima_hash=",
+"ima_template=" has not been processed yet, and hash_setup would check
+the configured hash algorithm against the compiled default: ima, and
+reject sha256. So at the end, the hash algorithm that is actually used
+will be sha1.
+
+With template "ima" removed from the configured default, we ensure that
+the default tempalte would at least be "ima-ng" which allows for
+basically any hash algorithm.
+
+This change would not break the algorithm compatibility checks for IMA.
+
+Fixes: 4286587dccd43 ("ima: add Kconfig default measurement list template")
+Signed-off-by: GUO Zihua <guozihua@huawei.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/ima/Kconfig | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -69,10 +69,9 @@ choice
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+- pathnames.
++ pathnames. The configured default template can be replaced
++ by specifying "ima_template=" on the boot command line.
+
+- config IMA_TEMPLATE
+- bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+@@ -82,7 +81,6 @@ endchoice
+ config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+- default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+@@ -102,19 +100,19 @@ choice
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+- depends on CRYPTO_SHA256=y && !IMA_TEMPLATE
++ depends on CRYPTO_SHA256=y
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+- depends on CRYPTO_SHA512=y && !IMA_TEMPLATE
++ depends on CRYPTO_SHA512=y
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+- depends on CRYPTO_WP512=y && !IMA_TEMPLATE
++ depends on CRYPTO_WP512=y
+
+ config IMA_DEFAULT_HASH_SM3
+ bool "SM3"
+- depends on CRYPTO_SM3=y && !IMA_TEMPLATE
++ depends on CRYPTO_SM3=y
+ endchoice
+
+ config IMA_DEFAULT_HASH
--- /dev/null
+From a3884774d731f03d3a3dd4fb70ec2d9341ceb39d Mon Sep 17 00:00:00 2001
+From: Yunfei Wang <yf.wang@mediatek.com>
+Date: Sat, 7 May 2022 16:52:03 +0800
+Subject: iommu/dma: Fix iova map result check bug
+
+From: Yunfei Wang <yf.wang@mediatek.com>
+
+commit a3884774d731f03d3a3dd4fb70ec2d9341ceb39d upstream.
+
+The data type of the return value of the iommu_map_sg_atomic
+is ssize_t, but the data type of iova size is size_t,
+e.g. one is int while the other is unsigned int.
+
+When iommu_map_sg_atomic return value is compared with iova size,
+it will force the signed int to be converted to unsigned int, if
+iova map fails and iommu_map_sg_atomic return error code is less
+than 0, then (ret < iova_len) is false, which will to cause not
+do free iova, and the master can still successfully get the iova
+of map fail, which is not expected.
+
+Therefore, we need to check the return value of iommu_map_sg_atomic
+in two cases according to whether it is less than 0.
+
+Fixes: ad8f36e4b6b1 ("iommu: return full error code from iommu_map_sg[_atomic]()")
+Signed-off-by: Yunfei Wang <yf.wang@mediatek.com>
+Cc: <stable@vger.kernel.org> # 5.15.*
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Miles Chen <miles.chen@mediatek.com>
+Link: https://lore.kernel.org/r/20220507085204.16914-1-yf.wang@mediatek.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/dma-iommu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -619,6 +619,7 @@ static struct page **__iommu_dma_alloc_n
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ struct page **pages;
+ dma_addr_t iova;
++ ssize_t ret;
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, domain))
+@@ -656,8 +657,8 @@ static struct page **__iommu_dma_alloc_n
+ arch_dma_prep_coherent(sg_page(sg), sg->length);
+ }
+
+- if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
+- < size)
++ ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
++ if (ret < 0 || ret < size)
+ goto out_free_sg;
+
+ sgt->sgl->dma_address = iova;
+@@ -1054,7 +1055,7 @@ static int iommu_dma_map_sg(struct devic
+ * implementation - it knows better than we do.
+ */
+ ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
+- if (ret < iova_len)
++ if (ret < 0 || ret < iova_len)
+ goto out_free_iova;
+
+ return __finalise_sg(dev, sg, nents, iova);
--- /dev/null
+From 8b9ad480bd1dd25f4ff4854af5685fa334a2f57a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 1 May 2022 21:28:23 +0800
+Subject: iommu/msm: Fix an incorrect NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 8b9ad480bd1dd25f4ff4854af5685fa334a2f57a upstream.
+
+The bug is here:
+ if (!iommu || iommu->dev->of_node != spec->np) {
+
+The list iterator value 'iommu' will *always* be set and non-NULL by
+list_for_each_entry(), so it is incorrect to assume that the iterator
+value will be NULL if the list is empty or no element is found (in fact,
+it will point to a invalid structure object containing HEAD).
+
+To fix the bug, use a new value 'iter' as the list iterator, while use
+the old value 'iommu' as a dedicated variable to point to the found one,
+and remove the unneeded check for 'iommu->dev->of_node != spec->np'
+outside the loop.
+
+Cc: stable@vger.kernel.org
+Fixes: f78ebca8ff3d6 ("iommu/msm: Add support for generic master bindings")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Link: https://lore.kernel.org/r/20220501132823.12714-1-xiam0nd.tong@gmail.com
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/iommu/msm_iommu.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/msm_iommu.c
++++ b/drivers/iommu/msm_iommu.c
+@@ -615,16 +615,19 @@ static void insert_iommu_master(struct d
+ static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+ {
+- struct msm_iommu_dev *iommu;
++ struct msm_iommu_dev *iommu = NULL, *iter;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+- list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
+- if (iommu->dev->of_node == spec->np)
++ list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
++ if (iter->dev->of_node == spec->np) {
++ iommu = iter;
+ break;
++ }
++ }
+
+- if (!iommu || iommu->dev->of_node != spec->np) {
++ if (!iommu) {
+ ret = -ENODEV;
+ goto fail;
+ }
--- /dev/null
+From a3d66a76348daf559873f19afc912a2a7c2ccdaf Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org>
+Date: Mon, 25 Apr 2022 13:37:05 +0200
+Subject: irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pali Rohár <pali@kernel.org>
+
+commit a3d66a76348daf559873f19afc912a2a7c2ccdaf upstream.
+
+Register ARMADA_370_XP_INT_FABRIC_MASK_OFFS is Armada 370 and XP specific
+and on new Armada platforms it has different meaning. It does not configure
+Performance Counter Overflow interrupt masking. So do not touch this
+register on non-A370/XP platforms (A375, A38x and A39x).
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 28da06dfd9e4 ("irqchip: armada-370-xp: Enable the PMU interrupts")
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20220425113706.29310-1-pali@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-armada-370-xp.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -308,7 +308,16 @@ static inline int armada_370_xp_msi_init
+
+ static void armada_xp_mpic_perf_init(void)
+ {
+- unsigned long cpuid = cpu_logical_map(smp_processor_id());
++ unsigned long cpuid;
++
++ /*
++ * This Performance Counter Overflow interrupt is specific for
++ * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
++ */
++ if (!of_machine_is_compatible("marvell,armada-370-xp"))
++ return;
++
++ cpuid = cpu_logical_map(smp_processor_id());
+
+ /* Enable Performance Counter Overflow interrupts */
+ writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
--- /dev/null
+From a255ee29252066d621df5d6b420bf534c6ba5bc0 Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Tue, 26 Apr 2022 09:01:18 -0700
+Subject: irqchip: irq-xtensa-mx: fix initial IRQ affinity
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit a255ee29252066d621df5d6b420bf534c6ba5bc0 upstream.
+
+When irq-xtensa-mx chip is used in non-SMP configuration its
+irq_set_affinity callback is not called leaving IRQ affinity set empty.
+As a result IRQ delivery does not work in that configuration.
+Initialize IRQ affinity of the xtensa MX interrupt distributor to CPU 0
+for all external IRQ lines.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-xtensa-mx.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/drivers/irqchip/irq-xtensa-mx.c
++++ b/drivers/irqchip/irq-xtensa-mx.c
+@@ -151,14 +151,25 @@ static struct irq_chip xtensa_mx_irq_chi
+ .irq_set_affinity = xtensa_mx_irq_set_affinity,
+ };
+
++static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
++{
++ unsigned int i;
++
++ irq_set_default_host(root_domain);
++ secondary_init_irq();
++
++ /* Initialize default IRQ routing to CPU 0 */
++ for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
++ set_er(1, MIROUT(i));
++}
++
+ int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
+ {
+ struct irq_domain *root_domain =
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+ &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+- irq_set_default_host(root_domain);
+- secondary_init_irq();
++ xtensa_mx_init_common(root_domain);
+ return 0;
+ }
+
+@@ -168,8 +179,7 @@ static int __init xtensa_mx_init(struct
+ struct irq_domain *root_domain =
+ irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ &xtensa_mx_irq_chip);
+- irq_set_default_host(root_domain);
+- secondary_init_irq();
++ xtensa_mx_init_common(root_domain);
+ return 0;
+ }
+ IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
--- /dev/null
+From 1aa0e8b144b6474c4914439d232d15bfe883636b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 2 Feb 2022 00:49:41 +0000
+Subject: Kconfig: Add option for asm goto w/ tied outputs to workaround clang-13 bug
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 1aa0e8b144b6474c4914439d232d15bfe883636b upstream.
+
+Add a config option to guard (future) usage of asm_volatile_goto() that
+includes "tied outputs", i.e. "+" constraints that specify both an input
+and output parameter. clang-13 has a bug[1] that causes compilation of
+such inline asm to fail, and KVM wants to use a "+m" constraint to
+implement a uaccess form of CMPXCHG[2]. E.g. the test code fails with
+
+ <stdin>:1:29: error: invalid operand in inline asm: '.long (${1:l}) - .'
+ int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }
+ ^
+ <stdin>:1:29: error: unknown token in expression
+ <inline asm>:1:9: note: instantiated into assembly here
+ .long () - .
+ ^
+ 2 errors generated.
+
+on clang-13, but passes on gcc (with appropriate asm goto support). The
+bug is fixed in clang-14, but won't be backported to clang-13 as the
+changes are too invasive/risky.
+
+gcc also had a similar bug[3], fixed in gcc-11, where gcc failed to
+account for its behavior of assigning two numbers to tied outputs (one
+for input, one for output) when evaluating symbolic references.
+
+[1] https://github.com/ClangBuiltLinux/linux/issues/1512
+[2] https://lore.kernel.org/all/YfMruK8%2F1izZ2VHS@google.com
+[3] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98096
+
+Suggested-by: Nick Desaulniers <ndesaulniers@google.com>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220202004945.2540433-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/Kconfig | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -77,6 +77,11 @@ config CC_HAS_ASM_GOTO_OUTPUT
+ depends on CC_HAS_ASM_GOTO
+ def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
+
++config CC_HAS_ASM_GOTO_TIED_OUTPUT
++ depends on CC_HAS_ASM_GOTO_OUTPUT
++ # Detect buggy gcc and clang, fixed in gcc-11 clang-14.
++ def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
++
+ config TOOLS_SUPPORT_RELR
+ def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
+
--- /dev/null
+From 3e35142ef99fe6b4fe5d834ad43ee13cca10a2dc Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Thu, 19 May 2022 14:42:37 +0530
+Subject: kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit 3e35142ef99fe6b4fe5d834ad43ee13cca10a2dc upstream.
+
+Since commit d1bcae833b32f1 ("ELF: Don't generate unused section
+symbols") [1], binutils (v2.36+) started dropping section symbols that
+it thought were unused. This isn't an issue in general, but with
+kexec_file.c, gcc is placing kexec_arch_apply_relocations[_add] into a
+separate .text.unlikely section and the section symbol ".text.unlikely"
+is being dropped. Due to this, recordmcount is unable to find a non-weak
+symbol in .text.unlikely to generate a relocation record against.
+
+Address this by dropping the weak attribute from these functions.
+Instead, follow the existing pattern of having architectures #define the
+name of the function they want to override in their headers.
+
+[1] https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=d1bcae833b32f1
+
+[akpm@linux-foundation.org: arch/s390/include/asm/kexec.h needs linux/module.h]
+Link: https://lkml.kernel.org/r/20220519091237.676736-1-naveen.n.rao@linux.vnet.ibm.com
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/kexec.h | 10 +++++++++
+ arch/x86/include/asm/kexec.h | 8 +++++++
+ include/linux/kexec.h | 46 ++++++++++++++++++++++++++++++++++--------
+ kernel/kexec_file.c | 34 -------------------------------
+ 4 files changed, 56 insertions(+), 42 deletions(-)
+
+--- a/arch/s390/include/asm/kexec.h
++++ b/arch/s390/include/asm/kexec.h
+@@ -9,6 +9,8 @@
+ #ifndef _S390_KEXEC_H
+ #define _S390_KEXEC_H
+
++#include <linux/module.h>
++
+ #include <asm/processor.h>
+ #include <asm/page.h>
+ #include <asm/setup.h>
+@@ -83,4 +85,12 @@ struct kimage_arch {
+ extern const struct kexec_file_ops s390_kexec_image_ops;
+ extern const struct kexec_file_ops s390_kexec_elf_ops;
+
++#ifdef CONFIG_KEXEC_FILE
++struct purgatory_info;
++int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
++ Elf_Shdr *section,
++ const Elf_Shdr *relsec,
++ const Elf_Shdr *symtab);
++#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
++#endif
+ #endif /*_S390_KEXEC_H */
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(v
+ extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
+ #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
+
++#ifdef CONFIG_KEXEC_FILE
++struct purgatory_info;
++int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
++ Elf_Shdr *section,
++ const Elf_Shdr *relsec,
++ const Elf_Shdr *symtab);
++#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
++#endif
+ #endif
+
+ typedef void crash_vmclear_fn(void);
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -187,14 +187,6 @@ void *kexec_purgatory_get_symbol_addr(st
+ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
+ void *arch_kexec_kernel_image_load(struct kimage *image);
+-int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+- Elf_Shdr *section,
+- const Elf_Shdr *relsec,
+- const Elf_Shdr *symtab);
+-int arch_kexec_apply_relocations(struct purgatory_info *pi,
+- Elf_Shdr *section,
+- const Elf_Shdr *relsec,
+- const Elf_Shdr *symtab);
+ int arch_kimage_file_post_load_cleanup(struct kimage *image);
+ #ifdef CONFIG_KEXEC_SIG
+ int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+@@ -223,6 +215,44 @@ extern int crash_exclude_mem_range(struc
+ unsigned long long mend);
+ extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
+ void **addr, unsigned long *sz);
++
++#ifndef arch_kexec_apply_relocations_add
++/*
++ * arch_kexec_apply_relocations_add - apply relocations of type RELA
++ * @pi: Purgatory to be relocated.
++ * @section: Section relocations applying to.
++ * @relsec: Section containing RELAs.
++ * @symtab: Corresponding symtab.
++ *
++ * Return: 0 on success, negative errno on error.
++ */
++static inline int
++arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
++ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
++{
++ pr_err("RELA relocation unsupported.\n");
++ return -ENOEXEC;
++}
++#endif
++
++#ifndef arch_kexec_apply_relocations
++/*
++ * arch_kexec_apply_relocations - apply relocations of type REL
++ * @pi: Purgatory to be relocated.
++ * @section: Section relocations applying to.
++ * @relsec: Section containing RELs.
++ * @symtab: Corresponding symtab.
++ *
++ * Return: 0 on success, negative errno on error.
++ */
++static inline int
++arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
++ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
++{
++ pr_err("REL relocation unsupported.\n");
++ return -ENOEXEC;
++}
++#endif
+ #endif /* CONFIG_KEXEC_FILE */
+
+ #ifdef CONFIG_KEXEC_ELF
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -109,40 +109,6 @@ int __weak arch_kexec_kernel_verify_sig(
+ #endif
+
+ /*
+- * arch_kexec_apply_relocations_add - apply relocations of type RELA
+- * @pi: Purgatory to be relocated.
+- * @section: Section relocations applying to.
+- * @relsec: Section containing RELAs.
+- * @symtab: Corresponding symtab.
+- *
+- * Return: 0 on success, negative errno on error.
+- */
+-int __weak
+-arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+- const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+-{
+- pr_err("RELA relocation unsupported.\n");
+- return -ENOEXEC;
+-}
+-
+-/*
+- * arch_kexec_apply_relocations - apply relocations of type REL
+- * @pi: Purgatory to be relocated.
+- * @section: Section relocations applying to.
+- * @relsec: Section containing RELs.
+- * @symtab: Corresponding symtab.
+- *
+- * Return: 0 on success, negative errno on error.
+- */
+-int __weak
+-arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+- const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+-{
+- pr_err("REL relocation unsupported.\n");
+- return -ENOEXEC;
+-}
+-
+-/*
+ * Free up memory used by kernel, initrd, and command line. This is temporary
+ * memory allocation which is not needed any more after these buffers have
+ * been loaded into separate segments and have been copied elsewhere.
--- /dev/null
+From 376b9133826865568167b4091ef92a68c4622b87 Mon Sep 17 00:00:00 2001
+From: Hyunchul Lee <hyc.lee@gmail.com>
+Date: Fri, 20 May 2022 14:35:47 +0900
+Subject: ksmbd: fix outstanding credits related bugs
+
+From: Hyunchul Lee <hyc.lee@gmail.com>
+
+commit 376b9133826865568167b4091ef92a68c4622b87 upstream.
+
+outstanding credits must be initialized to 0,
+because it means the sum of credits consumed by
+in-flight requests.
+And outstanding credits must be compared with
+total credits in smb2_validate_credit_charge(),
+because total credits are the sum of credits
+granted by ksmbd.
+
+This patch fix the following error,
+while frametest with Windows clients:
+
+Limits exceeding the maximum allowable outstanding requests,
+given : 128, pending : 8065
+
+Fixes: b589f5db6d4a ("ksmbd: limits exceeding the maximum allowable outstanding requests")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
+Reported-by: Yufan Chen <wiz.chen@gmail.com>
+Tested-by: Yufan Chen <wiz.chen@gmail.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ksmbd/connection.c | 2 +-
+ fs/ksmbd/smb2misc.c | 2 +-
+ fs/ksmbd/smb_common.c | 4 +++-
+ 3 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -62,7 +62,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void
+ atomic_set(&conn->req_running, 0);
+ atomic_set(&conn->r_count, 0);
+ conn->total_credits = 1;
+- conn->outstanding_credits = 1;
++ conn->outstanding_credits = 0;
+
+ init_waitqueue_head(&conn->req_running_q);
+ INIT_LIST_HEAD(&conn->conns_list);
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -339,7 +339,7 @@ static int smb2_validate_credit_charge(s
+ ret = 1;
+ }
+
+- if ((u64)conn->outstanding_credits + credit_charge > conn->vals->max_credits) {
++ if ((u64)conn->outstanding_credits + credit_charge > conn->total_credits) {
+ ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
+ credit_charge, conn->outstanding_credits);
+ ret = 1;
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -140,8 +140,10 @@ int ksmbd_verify_smb_message(struct ksmb
+
+ hdr = work->request_buf;
+ if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+- hdr->Command == SMB_COM_NEGOTIATE)
++ hdr->Command == SMB_COM_NEGOTIATE) {
++ work->conn->outstanding_credits++;
+ return 0;
++ }
+
+ return -EINVAL;
+ }
--- /dev/null
+From b041b7b9de6e1d4362de855ab90f9d03ef323edd Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Wed, 20 Apr 2022 12:49:07 +0200
+Subject: mac80211: upgrade passive scan to active scan on DFS channels after beacon rx
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit b041b7b9de6e1d4362de855ab90f9d03ef323edd upstream.
+
+In client mode, we can't connect to hidden SSID APs or SSIDs not advertised
+in beacons on DFS channels, since we're forced to passive scan. Fix this by
+sending out a probe request immediately after the first beacon, if active
+scan was requested by the user.
+
+Cc: stable@vger.kernel.org
+Reported-by: Catrinel Catrinescu <cc@80211.de>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://lore.kernel.org/r/20220420104907.36275-1-nbd@nbd.name
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/ieee80211_i.h | 5 +++++
+ net/mac80211/scan.c | 20 ++++++++++++++++++++
+ 2 files changed, 25 insertions(+)
+
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1129,6 +1129,9 @@ struct tpt_led_trigger {
+ * a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ * cancelled.
++ * @SCAN_BEACON_WAIT: Set whenever we're passive scanning because of radar/no-IR
++ * and could send a probe request after receiving a beacon.
++ * @SCAN_BEACON_DONE: Beacon received, we can now send a probe request
+ */
+ enum {
+ SCAN_SW_SCANNING,
+@@ -1137,6 +1140,8 @@ enum {
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
+ SCAN_HW_CANCELLED,
++ SCAN_BEACON_WAIT,
++ SCAN_BEACON_DONE,
+ };
+
+ /**
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -277,6 +277,16 @@ void ieee80211_scan_rx(struct ieee80211_
+ if (likely(!sdata1 && !sdata2))
+ return;
+
++ if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) {
++ /*
++ * we were passive scanning because of radar/no-IR, but
++ * the beacon/proberesp rx gives us an opportunity to upgrade
++ * to active scan
++ */
++ set_bit(SCAN_BEACON_DONE, &local->scanning);
++ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
++ }
++
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct cfg80211_scan_request *scan_req;
+ struct cfg80211_sched_scan_request *sched_scan_req;
+@@ -783,6 +793,8 @@ static int __ieee80211_start_scan(struct
+ IEEE80211_CHAN_RADAR)) ||
+ !req->n_ssids) {
+ next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
++ if (req->n_ssids)
++ set_bit(SCAN_BEACON_WAIT, &local->scanning);
+ } else {
+ ieee80211_scan_state_send_probe(local, &next_delay);
+ next_delay = IEEE80211_CHANNEL_TIME;
+@@ -994,6 +1006,8 @@ set_channel:
+ !scan_req->n_ssids) {
+ *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ local->next_scan_state = SCAN_DECISION;
++ if (scan_req->n_ssids)
++ set_bit(SCAN_BEACON_WAIT, &local->scanning);
+ return;
+ }
+
+@@ -1086,6 +1100,8 @@ void ieee80211_scan_work(struct work_str
+ goto out;
+ }
+
++ clear_bit(SCAN_BEACON_WAIT, &local->scanning);
++
+ /*
+ * as long as no delay is required advance immediately
+ * without scheduling a new work
+@@ -1096,6 +1112,10 @@ void ieee80211_scan_work(struct work_str
+ goto out_complete;
+ }
+
++ if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) &&
++ local->next_scan_state == SCAN_DECISION)
++ local->next_scan_state = SCAN_SEND_PROBE;
++
+ switch (local->next_scan_state) {
+ case SCAN_DECISION:
+ /* if no more bands/channels left, complete scan */
--- /dev/null
+From a04e1928e2ead144dc2f369768bc0a0f3110af89 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Tue, 31 May 2022 20:26:43 +0800
+Subject: mm/memremap: fix missing call to untrack_pfn() in pagemap_range()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit a04e1928e2ead144dc2f369768bc0a0f3110af89 upstream.
+
+We forget to call untrack_pfn() to pair with track_pfn_remap() when range
+is not allowed to hotplug. Fix it by jump err_kasan.
+
+Link: https://lkml.kernel.org/r/20220531122643.25249-1-linmiaohe@huawei.com
+Fixes: bca3feaa0764 ("mm/memory_hotplug: prevalidate the address range being added with platform")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Muchun Song <songmuchun@bytedance.com>
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memremap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/memremap.c
++++ b/mm/memremap.c
+@@ -245,7 +245,7 @@ static int pagemap_range(struct dev_page
+
+ if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
+ error = -EINVAL;
+- goto err_pfn_remap;
++ goto err_kasan;
+ }
+
+ mem_hotplug_begin();
--- /dev/null
+From c572e4888ad1be123c1516ec577ad30a700bbec4 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@techsingularity.net>
+Date: Thu, 26 May 2022 10:12:10 +0100
+Subject: mm/page_alloc: always attempt to allocate at least one page during bulk allocation
+
+From: Mel Gorman <mgorman@techsingularity.net>
+
+commit c572e4888ad1be123c1516ec577ad30a700bbec4 upstream.
+
+Peter Pavlisko reported the following problem on kernel bugzilla 216007.
+
+ When I try to extract an uncompressed tar archive (2.6 milion
+ files, 760.3 GiB in size) on newly created (empty) XFS file system,
+ after first low tens of gigabytes extracted the process hangs in
+ iowait indefinitely. One CPU core is 100% occupied with iowait,
+ the other CPU core is idle (on 2-core Intel Celeron G1610T).
+
+It was bisected to c9fa563072e1 ("xfs: use alloc_pages_bulk_array() for
+buffers") but XFS is only the messenger. The problem is that nothing is
+waking kswapd to reclaim some pages at a time the PCP lists cannot be
+refilled until some reclaim happens. The bulk allocator checks that there
+are some pages in the array and the original intent was that a bulk
+allocator did not necessarily need all the requested pages and it was best
+to return as quickly as possible.
+
+This was fine for the first user of the API but both NFS and XFS require
+the requested number of pages be available before making progress. Both
+could be adjusted to call the page allocator directly if a bulk allocation
+fails but it puts a burden on users of the API. Adjust the semantics to
+attempt at least one allocation via __alloc_pages() before returning so
+kswapd is woken if necessary.
+
+It was reported via bugzilla that the patch addressed the problem and that
+the tar extraction completed successfully. This may also address bug
+215975 but has yet to be confirmed.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=216007
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=215975
+Link: https://lkml.kernel.org/r/20220526091210.GC3441@techsingularity.net
+Fixes: 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator")
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: "Darrick J. Wong" <djwong@kernel.org>
+Cc: Dave Chinner <dchinner@redhat.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jesper Dangaard Brouer <brouer@redhat.com>
+Cc: Chuck Lever <chuck.lever@oracle.com>
+Cc: <stable@vger.kernel.org> [5.13+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5299,8 +5299,8 @@ unsigned long __alloc_pages_bulk(gfp_t g
+ page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
+ pcp, pcp_list);
+ if (unlikely(!page)) {
+- /* Try and get at least one page */
+- if (!nr_populated)
++ /* Try and allocate at least one page */
++ if (!nr_account)
+ goto failed_irq;
+ break;
+ }
--- /dev/null
+From 23e09be254f95a5b75cd87f91a4014f3b46dda3f Mon Sep 17 00:00:00 2001
+From: Bean Huo <beanhuo@micron.com>
+Date: Sun, 24 Apr 2022 00:16:23 +0200
+Subject: mmc: core: Allows to override the timeout value for ioctl() path
+
+From: Bean Huo <beanhuo@micron.com>
+
+commit 23e09be254f95a5b75cd87f91a4014f3b46dda3f upstream.
+
+Occasionally, user-land applications initiate longer timeout values for certain commands
+through ioctl() system call. But so far we are still using a fixed timeout of 10 seconds
+in mmc_poll_for_busy() on the ioctl() path, even if a custom timeout is specified in the
+userspace application. This patch allows custom timeout values to override this default
+timeout values on the ioctl path.
+
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Bean Huo <beanhuo@micron.com>
+Acked-by: Avri Altman <avri.altman@wdc.com>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20220423221623.1074556-3-huobean@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -609,11 +609,11 @@ static int __mmc_blk_ioctl_cmd(struct mm
+
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+- * Ensure RPMB/R1B command has completed by polling CMD13
+- * "Send Status".
++ * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we
++ * allow to override the default timeout value if a custom timeout is specified.
+ */
+- err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, false,
+- MMC_BUSY_IO);
++ err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS,
++ false, MMC_BUSY_IO);
+ }
+
+ return err;
--- /dev/null
+From 37462a920392cb86541650a6f4121155f11f1199 Mon Sep 17 00:00:00 2001
+From: Christophe de Dinechin <dinechin@redhat.com>
+Date: Thu, 14 Apr 2022 17:08:54 +0200
+Subject: nodemask.h: fix compilation error with GCC12
+
+From: Christophe de Dinechin <dinechin@redhat.com>
+
+commit 37462a920392cb86541650a6f4121155f11f1199 upstream.
+
+With gcc version 12.0.1 20220401 (Red Hat 12.0.1-0), building with
+defconfig results in the following compilation error:
+
+| CC mm/swapfile.o
+| mm/swapfile.c: In function `setup_swap_info':
+| mm/swapfile.c:2291:47: error: array subscript -1 is below array bounds
+| of `struct plist_node[]' [-Werror=array-bounds]
+| 2291 | p->avail_lists[i].prio = 1;
+| | ~~~~~~~~~~~~~~^~~
+| In file included from mm/swapfile.c:16:
+| ./include/linux/swap.h:292:27: note: while referencing `avail_lists'
+| 292 | struct plist_node avail_lists[]; /*
+| | ^~~~~~~~~~~
+
+This is due to the compiler detecting that the mask in
+node_states[__state] could theoretically be zero, which would lead to
+first_node() returning -1 through find_first_bit.
+
+I believe that the warning/error is legitimate. I first tried adding a
+test to check that the node mask is not emtpy, since a similar test exists
+in the case where MAX_NUMNODES == 1.
+
+However, adding the if statement causes other warnings to appear in
+for_each_cpu_node_but, because it introduces a dangling else ambiguity.
+And unfortunately, GCC is not smart enough to detect that the added test
+makes the case where (node) == -1 impossible, so it still complains with
+the same message.
+
+This is why I settled on replacing that with a harmless, but relatively
+useless (node) >= 0 test. Based on the warning for the dangling else, I
+also decided to fix the case where MAX_NUMNODES == 1 by moving the
+condition inside the for loop. It will still only be tested once. This
+ensures that the meaning of an else following for_each_node_mask or
+derivatives would not silently have a different meaning depending on the
+configuration.
+
+Link: https://lkml.kernel.org/r/20220414150855.2407137-3-dinechin@redhat.com
+Signed-off-by: Christophe de Dinechin <christophe@dinechin.org>
+Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Ben Segall <bsegall@google.com>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Zhen Lei <thunder.leizhen@huawei.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/nodemask.h | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/include/linux/nodemask.h
++++ b/include/linux/nodemask.h
+@@ -375,14 +375,13 @@ static inline void __nodes_fold(nodemask
+ }
+
+ #if MAX_NUMNODES > 1
+-#define for_each_node_mask(node, mask) \
+- for ((node) = first_node(mask); \
+- (node) < MAX_NUMNODES; \
+- (node) = next_node((node), (mask)))
++#define for_each_node_mask(node, mask) \
++ for ((node) = first_node(mask); \
++ (node >= 0) && (node) < MAX_NUMNODES; \
++ (node) = next_node((node), (mask)))
+ #else /* MAX_NUMNODES == 1 */
+-#define for_each_node_mask(node, mask) \
+- if (!nodes_empty(mask)) \
+- for ((node) = 0; (node) < 1; (node)++)
++#define for_each_node_mask(node, mask) \
++ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
+ #endif /* MAX_NUMNODES */
+
+ /*
--- /dev/null
+From 4d2900f20edfe541f75756a00deeb2ffe7c66bc1 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Wed, 27 Apr 2022 08:32:42 +0200
+Subject: phy: qcom-qmp: fix reset-controller leak on probe errors
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit 4d2900f20edfe541f75756a00deeb2ffe7c66bc1 upstream.
+
+Make sure to release the lane reset controller in case of a late probe
+error (e.g. probe deferral).
+
+Note that due to the reset controller being defined in devicetree in
+"lane" child nodes, devm_reset_control_get_exclusive() cannot be used
+directly.
+
+Fixes: e78f3d15e115 ("phy: qcom-qmp: new qmp phy driver for qcom-chipsets")
+Cc: stable@vger.kernel.org # 4.12
+Cc: Vivek Gautam <vivek.gautam@codeaurora.org>
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20220427063243.32576-3-johan+linaro@kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/qualcomm/phy-qcom-qmp.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -5382,6 +5382,11 @@ static const struct phy_ops qcom_qmp_pci
+ .owner = THIS_MODULE,
+ };
+
++static void qcom_qmp_reset_control_put(void *data)
++{
++ reset_control_put(data);
++}
++
+ static
+ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
+@@ -5476,6 +5481,10 @@ int qcom_qmp_phy_create(struct device *d
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
++ ret = devm_add_action_or_reset(dev, qcom_qmp_reset_control_put,
++ qphy->lane_rst);
++ if (ret)
++ return ret;
+ }
+
+ if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
--- /dev/null
+From f0a4bc38a12f5a0cc5ad68670d9480e91e6a94df Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan+linaro@kernel.org>
+Date: Wed, 27 Apr 2022 08:32:41 +0200
+Subject: phy: qcom-qmp: fix struct clk leak on probe errors
+
+From: Johan Hovold <johan+linaro@kernel.org>
+
+commit f0a4bc38a12f5a0cc5ad68670d9480e91e6a94df upstream.
+
+Make sure to release the pipe clock reference in case of a late probe
+error (e.g. probe deferral).
+
+Fixes: e78f3d15e115 ("phy: qcom-qmp: new qmp phy driver for qcom-chipsets")
+Cc: stable@vger.kernel.org # 4.12
+Cc: Vivek Gautam <vivek.gautam@codeaurora.org>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
+Link: https://lore.kernel.org/r/20220427063243.32576-2-johan+linaro@kernel.org
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/phy/qualcomm/phy-qcom-qmp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -5454,7 +5454,7 @@ int qcom_qmp_phy_create(struct device *d
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+- qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
++ qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (cfg->type == PHY_TYPE_PCIE ||
+ cfg->type == PHY_TYPE_USB3) {
--- /dev/null
+From f93e91a0372c922c20d5bee260b0f43b4b8a1bee Mon Sep 17 00:00:00 2001
+From: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Date: Fri, 20 May 2022 14:37:12 -0400
+Subject: RDMA/hfi1: Fix potential integer multiplication overflow errors
+
+From: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+
+commit f93e91a0372c922c20d5bee260b0f43b4b8a1bee upstream.
+
+When multiplying of different types, an overflow is possible even when
+storing the result in a larger type. This is because the conversion is
+done after the multiplication. So arithmetic overflow and thus in
+incorrect value is possible.
+
+Correct an instance of this in the inter packet delay calculation. Fix by
+ensuring one of the operands is u64 which will promote the other to u64 as
+well ensuring no overflow.
+
+Cc: stable@vger.kernel.org
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Link: https://lore.kernel.org/r/20220520183712.48973.29855.stgit@awfm-01.cornelisnetworks.com
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/hw/hfi1/init.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -488,7 +488,7 @@ void set_link_ipg(struct hfi1_pportdata
+ u16 shift, mult;
+ u64 src;
+ u32 current_egress_rate; /* Mbits /sec */
+- u32 max_pkt_time;
++ u64 max_pkt_time;
+ /*
+ * max_pkt_time is the maximum packet egress time in units
+ * of the fabric clock period 1/(805 MHz).
--- /dev/null
+From 60a60e32cf91169840abcb4a80f0b0df31708ba7 Mon Sep 17 00:00:00 2001
+From: Dong Aisheng <aisheng.dong@nxp.com>
+Date: Fri, 13 May 2022 15:11:26 -0700
+Subject: Revert "mm/cma.c: remove redundant cma_mutex lock"
+
+From: Dong Aisheng <aisheng.dong@nxp.com>
+
+commit 60a60e32cf91169840abcb4a80f0b0df31708ba7 upstream.
+
+This reverts commit a4efc174b382fcdb which introduced a regression issue
+that when there're multiple processes allocating dma memory in parallel by
+calling dma_alloc_coherent(), it may fail sometimes as follows:
+
+Error log:
+cma: cma_alloc: linux,cma: alloc failed, req-size: 148 pages, ret: -16
+cma: number of available pages:
+3@125+20@172+12@236+4@380+32@736+17@2287+23@2473+20@36076+99@40477+108@40852+44@41108+20@41196+108@41364+108@41620+
+108@42900+108@43156+483@44061+1763@45341+1440@47712+20@49324+20@49388+5076@49452+2304@55040+35@58141+20@58220+20@58284+
+7188@58348+84@66220+7276@66452+227@74525+6371@75549=> 33161 free of 81920 total pages
+
+When issue happened, we saw there were still 33161 pages (129M) free CMA
+memory and a lot available free slots for 148 pages in CMA bitmap that we
+want to allocate.
+
+When dumping memory info, we found that there was also ~342M normal
+memory, but only 1352K CMA memory left in buddy system while a lot of
+pageblocks were isolated.
+
+Memory info log:
+Normal free:351096kB min:30000kB low:37500kB high:45000kB reserved_highatomic:0KB
+ active_anon:98060kB inactive_anon:98948kB active_file:60864kB inactive_file:31776kB
+ unevictable:0kB writepending:0kB present:1048576kB managed:1018328kB mlocked:0kB
+ bounce:0kB free_pcp:220kB local_pcp:192kB free_cma:1352kB lowmem_reserve[]: 0 0 0
+Normal: 78*4kB (UECI) 1772*8kB (UMECI) 1335*16kB (UMECI) 360*32kB (UMECI) 65*64kB (UMCI)
+ 36*128kB (UMECI) 16*256kB (UMCI) 6*512kB (EI) 8*1024kB (UEI) 4*2048kB (MI) 8*4096kB (EI)
+ 8*8192kB (UI) 3*16384kB (EI) 8*32768kB (M) = 489288kB
+
+The root cause of this issue is that since commit a4efc174b382 ("mm/cma.c:
+remove redundant cma_mutex lock"), CMA supports concurrent memory
+allocation. It's possible that the memory range process A trying to alloc
+has already been isolated by the allocation of process B during memory
+migration.
+
+The problem here is that the memory range isolated during one allocation
+by start_isolate_page_range() could be much bigger than the real size we
+want to alloc due to the range is aligned to MAX_ORDER_NR_PAGES.
+
+Taking an ARMv7 platform with 1G memory as an example, when
+MAX_ORDER_NR_PAGES is big (e.g. 32M with max_order 14) and CMA memory is
+relatively small (e.g. 128M), there're only 4 MAX_ORDER slot, then it's
+very easy that all CMA memory may have already been isolated by other
+processes when one trying to allocate memory using dma_alloc_coherent().
+Since current CMA code will only scan one time of whole available CMA
+memory, then dma_alloc_coherent() may easy fail due to contention with
+other processes.
+
+This patch simply falls back to the original method that using cma_mutex
+to make alloc_contig_range() run sequentially to avoid the issue.
+
+Link: https://lkml.kernel.org/r/20220509094551.3596244-1-aisheng.dong@nxp.com
+Link: https://lore.kernel.org/all/20220315144521.3810298-2-aisheng.dong@nxp.com/
+Fixes: a4efc174b382 ("mm/cma.c: remove redundant cma_mutex lock")
+Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Lecopzer Chen <lecopzer.chen@mediatek.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org> [5.11+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/cma.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -37,6 +37,7 @@
+
+ struct cma cma_areas[MAX_CMA_AREAS];
+ unsigned cma_area_count;
++static DEFINE_MUTEX(cma_mutex);
+
+ phys_addr_t cma_get_base(const struct cma *cma)
+ {
+@@ -471,9 +472,10 @@ struct page *cma_alloc(struct cma *cma,
+ spin_unlock_irq(&cma->lock);
+
+ pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
++ mutex_lock(&cma_mutex);
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
+ GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
+-
++ mutex_unlock(&cma_mutex);
+ if (ret == 0) {
+ page = pfn_to_page(pfn);
+ break;
--- /dev/null
+From 746285cf81dc19502ab238249d75f5990bd2d231 Mon Sep 17 00:00:00 2001
+From: Alexander Wetzel <alexander@wetzel-home.de>
+Date: Fri, 22 Apr 2022 16:52:28 +0200
+Subject: rtl818x: Prevent using not initialized queues
+
+From: Alexander Wetzel <alexander@wetzel-home.de>
+
+commit 746285cf81dc19502ab238249d75f5990bd2d231 upstream.
+
+Using not existing queues can panic the kernel with rtl8180/rtl8185 cards.
+Ignore the skb priority for those cards, they only have one tx queue. Pierre
+Asselin (pa@panix.com) reported the kernel crash in the Gentoo forum:
+
+https://forums.gentoo.org/viewtopic-t-1147832-postdays-0-postorder-asc-start-25.html
+
+He also confirmed that this patch fixes the issue. In summary this happened:
+
+After updating wpa_supplicant from 2.9 to 2.10 the kernel crashed with a
+"divide error: 0000" when connecting to an AP. Control port tx now tries to
+use IEEE80211_AC_VO for the priority, which wpa_supplicants starts to use in
+2.10.
+
+Since only the rtl8187se part of the driver supports QoS, the priority
+of the skb is set to IEEE80211_AC_BE (2) by mac80211 for rtl8180/rtl8185
+cards.
+
+rtl8180 is then unconditionally reading out the priority and finally crashes on
+drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c line 544 without this
+patch:
+ idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries
+
+"ring->entries" is zero for rtl8180/rtl8185 cards, tx_ring[2] never got
+initialized.
+
+Cc: stable@vger.kernel.org
+Reported-by: pa@panix.com
+Tested-by: pa@panix.com
+Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220422145228.7567-1-alexander@wetzel-home.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
++++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+@@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_
+ struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_tx_ring *ring;
+ struct rtl8180_tx_desc *entry;
++ unsigned int prio = 0;
+ unsigned long flags;
+- unsigned int idx, prio, hw_prio;
++ unsigned int idx, hw_prio;
++
+ dma_addr_t mapping;
+ u32 tx_flags;
+ u8 rc_flags;
+@@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_
+ /* do arithmetic and then convert to le16 */
+ u16 frame_duration = 0;
+
+- prio = skb_get_queue_mapping(skb);
++ /* rtl8180/rtl8185 only has one useable tx queue */
++ if (dev->queues > IEEE80211_AC_BK)
++ prio = skb_get_queue_mapping(skb);
+ ring = &priv->tx_ring[prio];
+
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
--- /dev/null
+From d9f3af4fbb1d955bbaf872d9e76502f6e3e803cb Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 3 May 2022 10:08:03 +0200
+Subject: serial: pch: don't overwrite xmit->buf[0] by x_char
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+commit d9f3af4fbb1d955bbaf872d9e76502f6e3e803cb upstream.
+
+When x_char is to be sent, the TX path overwrites whatever is in the
+circular buffer at offset 0 with x_char and sends it using
+pch_uart_hal_write(). I don't understand how this was supposed to work
+if xmit->buf[0] already contained some character. It must have been
+lost.
+
+Remove this whole pop_tx_x() concept and do the work directly in the
+callers. (Without printing anything using dev_dbg().)
+
+Cc: <stable@vger.kernel.org>
+Fixes: 3c6a483275f4 (Serial: EG20T: add PCH_UART driver)
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Link: https://lore.kernel.org/r/20220503080808.28332-1-jslaby@suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/pch_uart.c | 27 +++++++--------------------
+ 1 file changed, 7 insertions(+), 20 deletions(-)
+
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -624,22 +624,6 @@ static int push_rx(struct eg20t_port *pr
+ return 0;
+ }
+
+-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
+-{
+- int ret = 0;
+- struct uart_port *port = &priv->port;
+-
+- if (port->x_char) {
+- dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
+- __func__, port->x_char, jiffies);
+- buf[0] = port->x_char;
+- port->x_char = 0;
+- ret = 1;
+- }
+-
+- return ret;
+-}
+-
+ static int dma_push_rx(struct eg20t_port *priv, int size)
+ {
+ int room;
+@@ -889,9 +873,10 @@ static unsigned int handle_tx(struct eg2
+
+ fifo_size = max(priv->fifo_size, 1);
+ tx_empty = 1;
+- if (pop_tx_x(priv, xmit->buf)) {
+- pch_uart_hal_write(priv, xmit->buf, 1);
++ if (port->x_char) {
++ pch_uart_hal_write(priv, &port->x_char, 1);
+ port->icount.tx++;
++ port->x_char = 0;
+ tx_empty = 0;
+ fifo_size--;
+ }
+@@ -946,9 +931,11 @@ static unsigned int dma_handle_tx(struct
+ }
+
+ fifo_size = max(priv->fifo_size, 1);
+- if (pop_tx_x(priv, xmit->buf)) {
+- pch_uart_hal_write(priv, xmit->buf, 1);
++
++ if (port->x_char) {
++ pch_uart_hal_write(priv, &port->x_char, 1);
+ port->icount.tx++;
++ port->x_char = 0;
+ fifo_size--;
+ }
+
mtd-cfi_cmdset_0002-use-chip_ready-for-write-on-s29gl064n.patch
media-coda-fix-reported-h264-profile.patch
media-coda-add-more-h264-levels-for-coda960.patch
+ima-remove-the-ima_template-kconfig-option.patch
+kconfig-add-option-for-asm-goto-w-tied-outputs-to-workaround-clang-13-bug.patch
+rdma-hfi1-fix-potential-integer-multiplication-overflow-errors.patch
+mmc-core-allows-to-override-the-timeout-value-for-ioctl-path.patch
+csky-patch_text-fixup-last-cpu-should-be-master.patch
+irqchip-armada-370-xp-do-not-touch-performance-counter-overflow-on-a375-a38x-a39x.patch
+irqchip-irq-xtensa-mx-fix-initial-irq-affinity.patch
+thermal-devfreq_cooling-use-local-ops-instead-of-global-ops.patch
+cfg80211-declare-module_firmware-for-regulatory.db.patch
+mac80211-upgrade-passive-scan-to-active-scan-on-dfs-channels-after-beacon-rx.patch
+um-use-asm-generic-dma-mapping.h.patch
+um-chan_user-fix-winch_tramp-return-value.patch
+um-fix-out-of-bounds-read-in-ldt-setup.patch
+kexec_file-drop-weak-attribute-from-arch_kexec_apply_relocations.patch
+ftrace-clean-up-hash-direct_functions-on-register-failures.patch
+ksmbd-fix-outstanding-credits-related-bugs.patch
+iommu-msm-fix-an-incorrect-null-check-on-list-iterator.patch
+iommu-dma-fix-iova-map-result-check-bug.patch
+revert-mm-cma.c-remove-redundant-cma_mutex-lock.patch
+mm-page_alloc-always-attempt-to-allocate-at-least-one-page-during-bulk-allocation.patch
+nodemask.h-fix-compilation-error-with-gcc12.patch
+hugetlb-fix-huge_pmd_unshare-address-update.patch
+mm-memremap-fix-missing-call-to-untrack_pfn-in-pagemap_range.patch
+xtensa-simdisk-fix-proc_read_simdisk.patch
+rtl818x-prevent-using-not-initialized-queues.patch
+asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch
+carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch
+stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch
+bcache-improve-multithreaded-bch_btree_check.patch
+bcache-improve-multithreaded-bch_sectors_dirty_init.patch
+bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch
+bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch
+serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch
+tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch
+gma500-fix-an-incorrect-null-check-on-list-iterator.patch
+arm64-dts-qcom-ipq8074-fix-the-sleep-clock-frequency.patch
+arm64-tegra-add-missing-dfll-reset-on-tegra210.patch
+clk-tegra-add-missing-reset-deassertion.patch
+phy-qcom-qmp-fix-struct-clk-leak-on-probe-errors.patch
+arm-dts-s5pv210-remove-spi-cs-high-on-panel-in-aries.patch
+arm-pxa-maybe-fix-gpio-lookup-tables.patch
+smb3-ebadf-eio-errors-in-rename-open-caused-by-race-condition-in-smb2_compound_op.patch
+docs-conf.py-cope-with-removal-of-language-none-in-sphinx-5.0.0.patch
+dt-bindings-gpio-altera-correct-interrupt-cells.patch
+vdpasim-allow-to-enable-a-vq-repeatedly.patch
+blk-iolatency-fix-inflight-count-imbalances-and-io-hangs-on-offline.patch
+coresight-core-fix-coresight-device-probe-failure-issue.patch
+phy-qcom-qmp-fix-reset-controller-leak-on-probe-errors.patch
--- /dev/null
+From 0a55cf74ffb5d004b93647e4389096880ce37d6b Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Thu, 12 May 2022 10:18:00 -0500
+Subject: SMB3: EBADF/EIO errors in rename/open caused by race condition in smb2_compound_op
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steve French <stfrench@microsoft.com>
+
+commit 0a55cf74ffb5d004b93647e4389096880ce37d6b upstream.
+
+There is a race condition in smb2_compound_op:
+
+after_close:
+ num_rqst++;
+
+ if (cfile) {
+ cifsFileInfo_put(cfile); // sends SMB2_CLOSE to the server
+ cfile = NULL;
+
+This is triggered by smb2_query_path_info operation that happens during
+revalidate_dentry. In smb2_query_path_info, get_readable_path is called to
+load the cfile, increasing the reference counter. If in the meantime, this
+reference becomes the very last, this call to cifsFileInfo_put(cfile) will
+trigger a SMB2_CLOSE request sent to the server just before sending this compound
+request – and so then the compound request fails either with EBADF/EIO depending
+on the timing at the server, because the handle is already closed.
+
+In the first scenario, the race seems to be happening between smb2_query_path_info
+triggered by the rename operation, and between “cleanup” of asynchronous writes – while
+fsync(fd) likely waits for the asynchronous writes to complete, releasing the writeback
+structures can happen after the close(fd) call. So the EBADF/EIO errors will pop up if
+the timing is such that:
+1) There are still outstanding references after close(fd) in the writeback structures
+2) smb2_query_path_info successfully fetches the cfile, increasing the refcounter by 1
+3) All writeback structures release the same cfile, reducing refcounter to 1
+4) smb2_compound_op is called with that cfile
+
+In the second scenario, the race seems to be similar – here open triggers the
+smb2_query_path_info operation, and if all other threads in the meantime decrease the
+refcounter to 1 similarly to the first scenario, again SMB2_CLOSE will be sent to the
+server just before issuing the compound request. This case is harder to reproduce.
+
+See https://bugzilla.samba.org/show_bug.cgi?id=15051
+
+Cc: stable@vger.kernel.org
+Fixes: 8de9e86c67ba ("cifs: create a helper to find a writeable handle by path name")
+Signed-off-by: Ondrej Hubsch <ohubsch@purestorage.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/cifs/smb2inode.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -358,8 +358,6 @@ smb2_compound_op(const unsigned int xid,
+ num_rqst++;
+
+ if (cfile) {
+- cifsFileInfo_put(cfile);
+- cfile = NULL;
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst - 2,
+ &rqst[1], &resp_buftype[1],
--- /dev/null
+From 2e6c86be0e57079d1fb6c7c7e5423db096d0548a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 27 Mar 2022 13:53:55 +0800
+Subject: stm: ltdc: fix two incorrect NULL checks on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 2e6c86be0e57079d1fb6c7c7e5423db096d0548a upstream.
+
+The two bugs are here:
+ if (encoder) {
+ if (bridge && bridge->timings)
+
+The list iterator value 'encoder/bridge' will *always* be set and
+non-NULL by drm_for_each_encoder()/list_for_each_entry(), so it is
+incorrect to assume that the iterator value will be NULL if the
+list is empty or no element is found.
+
+To fix the bug, use a new variable '*_iter' as the list iterator,
+while use the old variable 'encoder/bridge' as a dedicated pointer
+to point to the found element.
+
+Cc: stable@vger.kernel.org
+Fixes: 99e360442f223 ("drm/stm: Fix bus_flags handling")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Acked-by: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
+Signed-off-by: Philippe Cornu <philippe.cornu@foss.st.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220327055355.3808-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/stm/ltdc.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -528,8 +528,8 @@ static void ltdc_crtc_mode_set_nofb(stru
+ struct drm_device *ddev = crtc->dev;
+ struct drm_connector_list_iter iter;
+ struct drm_connector *connector = NULL;
+- struct drm_encoder *encoder = NULL;
+- struct drm_bridge *bridge = NULL;
++ struct drm_encoder *encoder = NULL, *en_iter;
++ struct drm_bridge *bridge = NULL, *br_iter;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
+ u32 total_width, total_height;
+@@ -538,15 +538,19 @@ static void ltdc_crtc_mode_set_nofb(stru
+ int ret;
+
+ /* get encoder from crtc */
+- drm_for_each_encoder(encoder, ddev)
+- if (encoder->crtc == crtc)
++ drm_for_each_encoder(en_iter, ddev)
++ if (en_iter->crtc == crtc) {
++ encoder = en_iter;
+ break;
++ }
+
+ if (encoder) {
+ /* get bridge from encoder */
+- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
+- if (bridge->encoder == encoder)
++ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
++ if (br_iter->encoder == encoder) {
++ bridge = br_iter;
+ break;
++ }
+
+ /* Get the connector from encoder */
+ drm_connector_list_iter_begin(ddev, &iter);
--- /dev/null
+From b947769b8f778db130aad834257fcaca25df2edc Mon Sep 17 00:00:00 2001
+From: Kant Fan <kant@allwinnertech.com>
+Date: Fri, 25 Mar 2022 15:30:30 +0800
+Subject: thermal: devfreq_cooling: use local ops instead of global ops
+
+From: Kant Fan <kant@allwinnertech.com>
+
+commit b947769b8f778db130aad834257fcaca25df2edc upstream.
+
+Fix access illegal address problem in following condition:
+
+There are multiple devfreq cooling devices in system, some of them has
+EM model but others do not. Energy model ops such as state2power will
+append to global devfreq_cooling_ops when the cooling device with
+EM model is registered. It makes the cooling device without EM model
+also use devfreq_cooling_ops after appending when registered later by
+of_devfreq_cooling_register_power() or of_devfreq_cooling_register().
+
+The IPA governor regards the cooling devices without EM model as a power
+actor, because they also have energy model ops, and will access illegal
+address at dfc->em_pd when execute cdev->ops->get_requested_power,
+cdev->ops->state2power or cdev->ops->power2state.
+
+Fixes: 615510fe13bd2 ("thermal: devfreq_cooling: remove old power model and use EM")
+Cc: 5.13+ <stable@vger.kernel.org> # 5.13+
+Signed-off-by: Kant Fan <kant@allwinnertech.com>
+Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thermal/devfreq_cooling.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/thermal/devfreq_cooling.c
++++ b/drivers/thermal/devfreq_cooling.c
+@@ -358,21 +358,28 @@ of_devfreq_cooling_register_power(struct
+ struct thermal_cooling_device *cdev;
+ struct device *dev = df->dev.parent;
+ struct devfreq_cooling_device *dfc;
++ struct thermal_cooling_device_ops *ops;
+ char *name;
+ int err, num_opps;
+
+- dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
+- if (!dfc)
++ ops = kmemdup(&devfreq_cooling_ops, sizeof(*ops), GFP_KERNEL);
++ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
++ dfc = kzalloc(sizeof(*dfc), GFP_KERNEL);
++ if (!dfc) {
++ err = -ENOMEM;
++ goto free_ops;
++ }
++
+ dfc->devfreq = df;
+
+ dfc->em_pd = em_pd_get(dev);
+ if (dfc->em_pd) {
+- devfreq_cooling_ops.get_requested_power =
++ ops->get_requested_power =
+ devfreq_cooling_get_requested_power;
+- devfreq_cooling_ops.state2power = devfreq_cooling_state2power;
+- devfreq_cooling_ops.power2state = devfreq_cooling_power2state;
++ ops->state2power = devfreq_cooling_state2power;
++ ops->power2state = devfreq_cooling_power2state;
+
+ dfc->power_ops = dfc_power;
+
+@@ -407,8 +414,7 @@ of_devfreq_cooling_register_power(struct
+ if (!name)
+ goto remove_qos_req;
+
+- cdev = thermal_of_cooling_device_register(np, name, dfc,
+- &devfreq_cooling_ops);
++ cdev = thermal_of_cooling_device_register(np, name, dfc, ops);
+ kfree(name);
+
+ if (IS_ERR(cdev)) {
+@@ -429,6 +435,8 @@ free_table:
+ kfree(dfc->freq_table);
+ free_dfc:
+ kfree(dfc);
++free_ops:
++ kfree(ops);
+
+ return ERR_PTR(err);
+ }
+@@ -510,11 +518,13 @@ EXPORT_SYMBOL_GPL(devfreq_cooling_em_reg
+ void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
+ {
+ struct devfreq_cooling_device *dfc;
++ const struct thermal_cooling_device_ops *ops;
+ struct device *dev;
+
+ if (IS_ERR_OR_NULL(cdev))
+ return;
+
++ ops = cdev->ops;
+ dfc = cdev->devdata;
+ dev = dfc->devfreq->dev.parent;
+
+@@ -525,5 +535,6 @@ void devfreq_cooling_unregister(struct t
+
+ kfree(dfc->freq_table);
+ kfree(dfc);
++ kfree(ops);
+ }
+ EXPORT_SYMBOL_GPL(devfreq_cooling_unregister);
--- /dev/null
+From 8b917cbe38e9b0d002492477a9fc2bfee2412ce4 Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 27 Mar 2022 14:15:16 +0800
+Subject: tilcdc: tilcdc_external: fix an incorrect NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 8b917cbe38e9b0d002492477a9fc2bfee2412ce4 upstream.
+
+The bug is here:
+ if (!encoder) {
+
+The list iterator value 'encoder' will *always* be set and non-NULL
+by list_for_each_entry(), so it is incorrect to assume that the
+iterator value will be NULL if the list is empty or no element
+is found.
+
+To fix the bug, use a new variable 'iter' as the list iterator,
+while use the original variable 'encoder' as a dedicated pointer
+to point to the found element.
+
+Cc: stable@vger.kernel.org
+Fixes: ec9eab097a500 ("drm/tilcdc: Add drm bridge support for attaching drm bridge drivers")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Reviewed-by: Jyri Sarha <jyri.sarha@iki.fi>
+Tested-by: Jyri Sarha <jyri.sarha@iki.fi>
+Signed-off-by: Jyri Sarha <jyri.sarha@iki.fi>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220327061516.5076-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tilcdc/tilcdc_external.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
+@@ -60,11 +60,13 @@ struct drm_connector *tilcdc_encoder_fin
+ int tilcdc_add_component_encoder(struct drm_device *ddev)
+ {
+ struct tilcdc_drm_private *priv = ddev->dev_private;
+- struct drm_encoder *encoder;
++ struct drm_encoder *encoder = NULL, *iter;
+
+- list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
+- if (encoder->possible_crtcs & (1 << priv->crtc->index))
++ list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
++ if (iter->possible_crtcs & (1 << priv->crtc->index)) {
++ encoder = iter;
+ break;
++ }
+
+ if (!encoder) {
+ dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);
--- /dev/null
+From 57ae0b67b747031bc41fb44643aa5344ab58607e Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Fri, 20 May 2022 19:45:36 +0200
+Subject: um: chan_user: Fix winch_tramp() return value
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 57ae0b67b747031bc41fb44643aa5344ab58607e upstream.
+
+The previous fix here was only partially correct, it did
+result in returning a proper error value in case of error,
+but it also clobbered the pid that we need to return from
+this function (not just zero for success).
+
+As a result, it returned 0 here, but later this is treated
+as a pid and used to kill the process, but since it's now
+0 we kill(0, SIGKILL), which makes UML kill itself rather
+than just the helper thread.
+
+Fix that and make it more obvious by using a separate
+variable for the pid.
+
+Fixes: ccf1236ecac4 ("um: fix error return code in winch_tramp()")
+Reported-and-tested-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/drivers/chan_user.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/um/drivers/chan_user.c
++++ b/arch/um/drivers/chan_user.c
+@@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tt
+ unsigned long *stack_out)
+ {
+ struct winch_data data;
+- int fds[2], n, err;
++ int fds[2], n, err, pid;
+ char c;
+
+ err = os_pipe(fds, 1, 1);
+@@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tt
+ * problem with /dev/net/tun, which if held open by this
+ * thread, prevents the TUN/TAP device from being reused.
+ */
+- err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
+- if (err < 0) {
++ pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
++ if (pid < 0) {
++ err = pid;
+ printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
+ -err);
+ goto out_close;
+@@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tt
+ goto out_close;
+ }
+
+- return err;
++ return pid;
+
+ out_close:
+ close(fds[1]);
--- /dev/null
+From 2a4a62a14be1947fa945c5c11ebf67326381a568 Mon Sep 17 00:00:00 2001
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Date: Mon, 23 May 2022 16:04:03 +0200
+Subject: um: Fix out-of-bounds read in LDT setup
+
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+
+commit 2a4a62a14be1947fa945c5c11ebf67326381a568 upstream.
+
+syscall_stub_data() expects the data_count parameter to be the number of
+longs, not bytes.
+
+ ==================================================================
+ BUG: KASAN: stack-out-of-bounds in syscall_stub_data+0x70/0xe0
+ Read of size 128 at addr 000000006411f6f0 by task swapper/1
+
+ CPU: 0 PID: 1 Comm: swapper Not tainted 5.18.0+ #18
+ Call Trace:
+ show_stack.cold+0x166/0x2a7
+ __dump_stack+0x3a/0x43
+ dump_stack_lvl+0x1f/0x27
+ print_report.cold+0xdb/0xf81
+ kasan_report+0x119/0x1f0
+ kasan_check_range+0x3a3/0x440
+ memcpy+0x52/0x140
+ syscall_stub_data+0x70/0xe0
+ write_ldt_entry+0xac/0x190
+ init_new_ldt+0x515/0x960
+ init_new_context+0x2c4/0x4d0
+ mm_init.constprop.0+0x5ed/0x760
+ mm_alloc+0x118/0x170
+ 0x60033f48
+ do_one_initcall+0x1d7/0x860
+ 0x60003e7b
+ kernel_init+0x6e/0x3d4
+ new_thread_handler+0x1e7/0x2c0
+
+ The buggy address belongs to stack of task swapper/1
+ and is located at offset 64 in frame:
+ init_new_ldt+0x0/0x960
+
+ This frame has 2 objects:
+ [32, 40) 'addr'
+ [64, 80) 'desc'
+ ==================================================================
+
+Fixes: 858259cf7d1c443c83 ("uml: maintain own LDT entries")
+Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/um/ldt.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/um/ldt.c
++++ b/arch/x86/um/ldt.c
+@@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id
+ {
+ long res;
+ void *stub_addr;
++
++ BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
++
+ res = syscall_stub_data(mm_idp, (unsigned long *)desc,
+- (sizeof(*desc) + sizeof(long) - 1) &
+- ~(sizeof(long) - 1),
++ sizeof(*desc) / sizeof(long),
+ addr, &stub_addr);
+ if (!res) {
+ unsigned long args[] = { func,
--- /dev/null
+From 365719035526e8eda214a1cedb2e1c96e969a0d7 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Mon, 28 Mar 2022 09:46:25 +0200
+Subject: um: Use asm-generic/dma-mapping.h
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 365719035526e8eda214a1cedb2e1c96e969a0d7 upstream.
+
+If DMA (PCI over virtio) is enabled, then some drivers may
+enable CONFIG_DMA_OPS as well, and then we pull in the x86
+definition of get_arch_dma_ops(), which uses the dma_ops
+symbol, which isn't defined.
+
+Since we don't have real DMA ops nor any kind of IOMMU fix
+this in the simplest possible way: pull in the asm-generic
+file instead of inheriting the x86 one. It's not clear why
+those drivers that do (e.g. VDPA) "select DMA_OPS", and if
+they'd even work with this, but chances are nobody will be
+wanting to do that anyway, so fixing the build failure is
+good enough.
+
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Fixes: 68f5d3f3b654 ("um: add PCI over virtio emulation driver")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
+Acked-by: Randy Dunlap <rdunlap@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/um/include/asm/Kbuild | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
+index f1f3f52f1e9c..b2d834a29f3a 100644
+--- a/arch/um/include/asm/Kbuild
++++ b/arch/um/include/asm/Kbuild
+@@ -4,6 +4,7 @@ generic-y += bug.h
+ generic-y += compat.h
+ generic-y += current.h
+ generic-y += device.h
++generic-y += dma-mapping.h
+ generic-y += emergency-restart.h
+ generic-y += exec.h
+ generic-y += extable.h
+--
+2.36.1
+
--- /dev/null
+From 242436973831aa97e8ce19533c6c912ea8def31b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= <eperezma@redhat.com>
+Date: Thu, 19 May 2022 16:59:19 +0200
+Subject: vdpasim: allow to enable a vq repeatedly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eugenio Pérez <eperezma@redhat.com>
+
+commit 242436973831aa97e8ce19533c6c912ea8def31b upstream.
+
+Code must be resilient to enable a queue many times.
+
+At the moment the queue is resetting so it's definitely not the expected
+behavior.
+
+v2: set vq->ready = 0 at disable.
+
+Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
+Message-Id: <20220519145919.772896-1-eperezma@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -353,11 +353,14 @@ static void vdpasim_set_vq_ready(struct
+ {
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
++ bool old_ready;
+
+ spin_lock(&vdpasim->lock);
++ old_ready = vq->ready;
+ vq->ready = ready;
+- if (vq->ready)
++ if (vq->ready && !old_ready) {
+ vdpasim_queue_ready(vdpasim, idx);
++ }
+ spin_unlock(&vdpasim->lock);
+ }
+
--- /dev/null
+From b011946d039d66bbc7102137e98cc67e1356aa87 Mon Sep 17 00:00:00 2001
+From: Yi Yang <yiyang13@huawei.com>
+Date: Tue, 10 May 2022 16:05:33 +0800
+Subject: xtensa/simdisk: fix proc_read_simdisk()
+
+From: Yi Yang <yiyang13@huawei.com>
+
+commit b011946d039d66bbc7102137e98cc67e1356aa87 upstream.
+
+The commit a69755b18774 ("xtensa simdisk: switch to proc_create_data()")
+split read operation into two parts, first retrieving the path when it's
+non-null and second retrieving the trailing '\n'. However when the path
+is non-null the first simple_read_from_buffer updates ppos, and the
+second simple_read_from_buffer returns 0 if ppos is greater than 1 (i.e.
+almost always). As a result reading from that proc file is almost always
+empty.
+
+Fix it by making a temporary copy of the path with the trailing '\n' and
+using simple_read_from_buffer on that copy.
+
+Cc: stable@vger.kernel.org
+Fixes: a69755b18774 ("xtensa simdisk: switch to proc_create_data()")
+Signed-off-by: Yi Yang <yiyang13@huawei.com>
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/xtensa/platforms/iss/simdisk.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/arch/xtensa/platforms/iss/simdisk.c
++++ b/arch/xtensa/platforms/iss/simdisk.c
+@@ -212,12 +212,18 @@ static ssize_t proc_read_simdisk(struct
+ struct simdisk *dev = PDE_DATA(file_inode(file));
+ const char *s = dev->filename;
+ if (s) {
+- ssize_t n = simple_read_from_buffer(buf, size, ppos,
+- s, strlen(s));
+- if (n < 0)
+- return n;
+- buf += n;
+- size -= n;
++ ssize_t len = strlen(s);
++ char *temp = kmalloc(len + 2, GFP_KERNEL);
++
++ if (!temp)
++ return -ENOMEM;
++
++ len = scnprintf(temp, len + 2, "%s\n", s);
++ len = simple_read_from_buffer(buf, size, ppos,
++ temp, len);
++
++ kfree(temp);
++ return len;
+ }
+ return simple_read_from_buffer(buf, size, ppos, "\n", 1);
+ }