--- /dev/null
+From 3f5e3d3a8b895c8a11da8b0063ba2022dd9e2045 Mon Sep 17 00:00:00 2001
+From: Jonathan Bakker <xc-racer2@live.ca>
+Date: Sun, 27 Mar 2022 11:08:51 -0700
+Subject: ARM: dts: s5pv210: Correct interrupt name for bluetooth in Aries
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+commit 3f5e3d3a8b895c8a11da8b0063ba2022dd9e2045 upstream.
+
+Correct the name of the bluetooth interrupt from host-wake to
+host-wakeup.
+
+Fixes: 1c65b6184441b ("ARM: dts: s5pv210: Correct BCM4329 bluetooth node")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/CY4PR04MB0567495CFCBDC8D408D44199CB1C9@CY4PR04MB0567.namprd04.prod.outlook.com
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/s5pv210-aries.dtsi | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -896,7 +896,7 @@
+ device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gph2>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "host-wake";
++ interrupt-names = "host-wakeup";
+ };
+ };
+
--- /dev/null
+From 72ef98445aca568a81c2da050532500a8345ad3a Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Tue, 5 Apr 2022 10:02:00 -0400
+Subject: Bluetooth: hci_qca: Use del_timer_sync() before freeing
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 72ef98445aca568a81c2da050532500a8345ad3a upstream.
+
+While looking at a crash report on a timer list being corrupted, which
+usually happens when a timer is freed while still active. This is
+commonly triggered by code calling del_timer() instead of
+del_timer_sync() just before freeing.
+
+One possible culprit is the hci_qca driver, which does exactly that.
+
+Eric mentioned that wake_retrans_timer could be rearmed via the work
+queue, so also move the destruction of the work queue before
+del_timer_sync().
+
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: stable@vger.kernel.org
+Fixes: 0ff252c1976da ("Bluetooth: hciuart: Add support QCA chipset for UART")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/hci_qca.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -689,9 +689,9 @@ static int qca_close(struct hci_uart *hu
+ skb_queue_purge(&qca->tx_wait_q);
+ skb_queue_purge(&qca->txq);
+ skb_queue_purge(&qca->rx_memdump_q);
+- del_timer(&qca->tx_idle_timer);
+- del_timer(&qca->wake_retrans_timer);
+ destroy_workqueue(qca->workqueue);
++ del_timer_sync(&qca->tx_idle_timer);
++ del_timer_sync(&qca->wake_retrans_timer);
+ qca->hu = NULL;
+
+ kfree_skb(qca->rx_skb);
--- /dev/null
+From 4ee4cdad368a26de3967f2975806a9ee2fa245df Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <festevam@denx.de>
+Date: Wed, 20 Apr 2022 09:06:01 -0300
+Subject: crypto: caam - fix i.MX6SX entropy delay value
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Fabio Estevam <festevam@denx.de>
+
+commit 4ee4cdad368a26de3967f2975806a9ee2fa245df upstream.
+
+Since commit 358ba762d9f1 ("crypto: caam - enable prediction resistance
+in HRWNG") the following CAAM errors can be seen on i.MX6SX:
+
+caam_jr 2101000.jr: 20003c5b: CCB: desc idx 60: RNG: Hardware error
+hwrng: no data available
+
+This error is due to an incorrect entropy delay for i.MX6SX.
+
+Fix it by increasing the minimum entropy delay for i.MX6SX
+as done in U-Boot:
+https://patchwork.ozlabs.org/project/uboot/patch/20220415111049.2565744-1-gaurav.jain@nxp.com/
+
+As explained in the U-Boot patch:
+
+"RNG self tests are run to determine the correct entropy delay.
+Such tests are executed with different voltages and temperatures to identify
+the worst case value for the entropy delay. For i.MX6SX, it was determined
+that after adding a margin value of 1000 the minimum entropy delay should be
+at least 12000."
+
+Cc: <stable@vger.kernel.org>
+Fixes: 358ba762d9f1 ("crypto: caam - enable prediction resistance in HRWNG")
+Signed-off-by: Fabio Estevam <festevam@denx.de>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Reviewed-by: Vabhav Sharma <vabhav.sharma@nxp.com>
+Reviewed-by: Gaurav Jain <gaurav.jain@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/caam/ctrl.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_
+ }
+ #endif
+
++static bool needs_entropy_delay_adjustment(void)
++{
++ if (of_machine_is_compatible("fsl,imx6sx"))
++ return true;
++ return false;
++}
++
+ /* Probe routine for CAAM top (controller) level */
+ static int caam_probe(struct platform_device *pdev)
+ {
+@@ -855,6 +862,8 @@ static int caam_probe(struct platform_de
+ * Also, if a handle was instantiated, do not change
+ * the TRNG parameters.
+ */
++ if (needs_entropy_delay_adjustment())
++ ent_delay = 12000;
+ if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ dev_info(dev,
+ "Entropy delay = %u\n",
+@@ -871,6 +880,15 @@ static int caam_probe(struct platform_de
+ */
+ ret = instantiate_rng(dev, inst_handles,
+ gen_sk);
++ /*
++ * Entropy delay is determined via TRNG characterization.
++ * TRNG characterization is run across different voltages
++ * and temperatures.
++ * If worst case value for ent_dly is identified,
++ * the loop can be skipped for that platform.
++ */
++ if (needs_entropy_delay_adjustment())
++ break;
+ if (ret == -EAGAIN)
+ /*
+ * if here, the loop will rerun,
--- /dev/null
+From foo@baz Fri Jun 3 04:46:19 PM CEST 2022
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 2 Jun 2022 22:22:32 +0200
+Subject: crypto: drbg - make reseeding from get_random_bytes() synchronous
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Message-ID: <20220602202232.281326-6-Jason@zx2c4.com>
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 074bcd4000e0d812bc253f86fedc40f81ed59ccc upstream.
+
+get_random_bytes() usually hasn't full entropy available by the time DRBG
+instances are first getting seeded from it during boot. Thus, the DRBG
+implementation registers random_ready_callbacks which would in turn
+schedule some work for reseeding the DRBGs once get_random_bytes() has
+sufficient entropy available.
+
+For reference, the relevant history around handling DRBG (re)seeding in
+the context of a not yet fully seeded get_random_bytes() is:
+
+ commit 16b369a91d0d ("random: Blocking API for accessing
+ nonblocking_pool")
+ commit 4c7879907edd ("crypto: drbg - add async seeding operation")
+
+ commit 205a525c3342 ("random: Add callback API for random pool
+ readiness")
+ commit 57225e679788 ("crypto: drbg - Use callback API for random
+ readiness")
+ commit c2719503f5e1 ("random: Remove kernel blocking API")
+
+However, some time later, the initialization state of get_random_bytes()
+has been made queryable via rng_is_initialized() introduced with commit
+9a47249d444d ("random: Make crng state queryable"). This primitive now
+allows for streamlining the DRBG reseeding from get_random_bytes() by
+replacing that aforementioned asynchronous work scheduling from
+random_ready_callbacks with some simpler, synchronous code in
+drbg_generate() next to the related logic already present therein. Apart
+from improving overall code readability, this change will also enable DRBG
+users to rely on wait_for_random_bytes() for ensuring that the initial
+seeding has completed, if desired.
+
+The previous patches already laid the grounds by making drbg_seed() to
+record at each DRBG instance whether it was being seeded at a time when
+rng_is_initialized() still had been false as indicated by
+->seeded == DRBG_SEED_STATE_PARTIAL.
+
+All that remains to be done now is to make drbg_generate() check for this
+condition, determine whether rng_is_initialized() has flipped to true in
+the meanwhile and invoke a reseed from get_random_bytes() if so.
+
+Make this move:
+- rename the former drbg_async_seed() work handler, i.e. the one in charge
+ of reseeding a DRBG instance from get_random_bytes(), to
+ "drbg_seed_from_random()",
+- change its signature as appropriate, i.e. make it take a struct
+ drbg_state rather than a work_struct and change its return type from
+ "void" to "int" in order to allow for passing error information from
+ e.g. its __drbg_seed() invocation onwards to callers,
+- make drbg_generate() invoke this drbg_seed_from_random() once it
+ encounters a DRBG instance with ->seeded == DRBG_SEED_STATE_PARTIAL by
+ the time rng_is_initialized() has flipped to true and
+- prune everything related to the former, random_ready_callback based
+ mechanism.
+
+As drbg_seed_from_random() is now getting invoked from drbg_generate() with
+the ->drbg_mutex being held, it must not attempt to recursively grab it
+once again. Remove the corresponding mutex operations from what is now
+drbg_seed_from_random(). Furthermore, as drbg_seed_from_random() can now
+report errors directly to its caller, there's no need for it to temporarily
+switch the DRBG's ->seeded state to DRBG_SEED_STATE_UNSEEDED so that a
+failure of the subsequently invoked __drbg_seed() will get signaled to
+drbg_generate(). Don't do it then.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[Jason: for stable, undid the modifications for the backport of 5acd3548.]
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/drbg.c | 61 +++++++++-----------------------------------------
+ drivers/char/random.c | 2 -
+ include/crypto/drbg.h | 2 -
+ 3 files changed, 11 insertions(+), 54 deletions(-)
+
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1086,12 +1086,10 @@ static inline int drbg_get_random_bytes(
+ return 0;
+ }
+
+-static void drbg_async_seed(struct work_struct *work)
++static int drbg_seed_from_random(struct drbg_state *drbg)
+ {
+ struct drbg_string data;
+ LIST_HEAD(seedlist);
+- struct drbg_state *drbg = container_of(work, struct drbg_state,
+- seed_work);
+ unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
+ unsigned char entropy[32];
+ int ret;
+@@ -1102,23 +1100,15 @@ static void drbg_async_seed(struct work_
+ drbg_string_fill(&data, entropy, entropylen);
+ list_add_tail(&data.list, &seedlist);
+
+- mutex_lock(&drbg->drbg_mutex);
+-
+ ret = drbg_get_random_bytes(drbg, entropy, entropylen);
+ if (ret)
+- goto unlock;
+-
+- /* Reset ->seeded so that if __drbg_seed fails the next
+- * generate call will trigger a reseed.
+- */
+- drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
++ goto out;
+
+- __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
+-
+-unlock:
+- mutex_unlock(&drbg->drbg_mutex);
++ ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
+
++out:
+ memzero_explicit(entropy, entropylen);
++ return ret;
+ }
+
+ /*
+@@ -1421,6 +1411,11 @@ static int drbg_generate(struct drbg_sta
+ goto err;
+ /* 9.3.1 step 7.4 */
+ addtl = NULL;
++ } else if (rng_is_initialized() &&
++ drbg->seeded == DRBG_SEED_STATE_PARTIAL) {
++ len = drbg_seed_from_random(drbg);
++ if (len)
++ goto err;
+ }
+
+ if (addtl && 0 < addtl->len)
+@@ -1513,44 +1508,15 @@ static int drbg_generate_long(struct drb
+ return 0;
+ }
+
+-static int drbg_schedule_async_seed(struct notifier_block *nb, unsigned long action, void *data)
+-{
+- struct drbg_state *drbg = container_of(nb, struct drbg_state,
+- random_ready);
+-
+- schedule_work(&drbg->seed_work);
+- return 0;
+-}
+-
+ static int drbg_prepare_hrng(struct drbg_state *drbg)
+ {
+- int err;
+-
+ /* We do not need an HRNG in test mode. */
+ if (list_empty(&drbg->test_data.list))
+ return 0;
+
+ drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+
+- INIT_WORK(&drbg->seed_work, drbg_async_seed);
+-
+- drbg->random_ready.notifier_call = drbg_schedule_async_seed;
+- err = register_random_ready_notifier(&drbg->random_ready);
+-
+- switch (err) {
+- case 0:
+- break;
+-
+- case -EALREADY:
+- err = 0;
+- fallthrough;
+-
+- default:
+- drbg->random_ready.notifier_call = NULL;
+- return err;
+- }
+-
+- return err;
++ return 0;
+ }
+
+ /*
+@@ -1644,11 +1610,6 @@ free_everything:
+ */
+ static int drbg_uninstantiate(struct drbg_state *drbg)
+ {
+- if (drbg->random_ready.notifier_call) {
+- unregister_random_ready_notifier(&drbg->random_ready);
+- cancel_work_sync(&drbg->seed_work);
+- }
+-
+ if (!IS_ERR_OR_NULL(drbg->jent))
+ crypto_free_rng(drbg->jent);
+ drbg->jent = NULL;
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -163,7 +163,6 @@ int __cold register_random_ready_notifie
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+ }
+-EXPORT_SYMBOL(register_random_ready_notifier);
+
+ /*
+ * Delete a previously registered readiness callback function.
+@@ -178,7 +177,6 @@ int __cold unregister_random_ready_notif
+ spin_unlock_irqrestore(&random_ready_chain_lock, flags);
+ return ret;
+ }
+-EXPORT_SYMBOL(unregister_random_ready_notifier);
+
+ static void __cold process_random_ready_list(void)
+ {
+--- a/include/crypto/drbg.h
++++ b/include/crypto/drbg.h
+@@ -137,12 +137,10 @@ struct drbg_state {
+ bool pr; /* Prediction resistance enabled? */
+ bool fips_primed; /* Continuous test primed? */
+ unsigned char *prev; /* FIPS 140-2 continuous test value */
+- struct work_struct seed_work; /* asynchronous seeding support */
+ struct crypto_rng *jent;
+ const struct drbg_state_ops *d_ops;
+ const struct drbg_core *core;
+ struct drbg_string test_data;
+- struct notifier_block random_ready;
+ };
+
+ static inline __u8 drbg_statelen(struct drbg_state *drbg)
--- /dev/null
+From foo@baz Fri Jun 3 04:46:19 PM CEST 2022
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 2 Jun 2022 22:22:31 +0200
+Subject: crypto: drbg - move dynamic ->reseed_threshold adjustments to __drbg_seed()
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Message-ID: <20220602202232.281326-5-Jason@zx2c4.com>
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 262d83a4290c331cd4f617a457408bdb82fbb738 upstream.
+
+Since commit 42ea507fae1a ("crypto: drbg - reseed often if seedsource is
+degraded"), the maximum seed lifetime represented by ->reseed_threshold
+gets temporarily lowered if the get_random_bytes() source cannot provide
+sufficient entropy yet, as is common during boot, and restored back to
+the original value again once that has changed.
+
+More specifically, if the add_random_ready_callback() invoked from
+drbg_prepare_hrng() in the course of DRBG instantiation does not return
+-EALREADY, that is, if get_random_bytes() has not been fully initialized
+at this point yet, drbg_prepare_hrng() will lower ->reseed_threshold
+to a value of 50. The drbg_async_seed() scheduled from said
+random_ready_callback will eventually restore the original value.
+
+A future patch will replace the random_ready_callback based notification
+mechanism and thus, there will be no add_random_ready_callback() return
+value anymore which could get compared to -EALREADY.
+
+However, there's __drbg_seed() which gets invoked in the course of both,
+the DRBG instantiation as well as the eventual reseeding from
+get_random_bytes() in aforementioned drbg_async_seed(), if any. Moreover,
+it knows about the get_random_bytes() initialization state by the time the
+seed data had been obtained from it: the new_seed_state argument introduced
+with the previous patch would get set to DRBG_SEED_STATE_PARTIAL in case
+get_random_bytes() had not been fully initialized yet and to
+DRBG_SEED_STATE_FULL otherwise. Thus, __drbg_seed() provides a convenient
+alternative for managing that ->reseed_threshold lowering and restoring at
+a central place.
+
+Move all ->reseed_threshold adjustment code from drbg_prepare_hrng() and
+drbg_async_seed() respectively to __drbg_seed(). Make __drbg_seed()
+lower the ->reseed_threshold to 50 in case its new_seed_state argument
+equals DRBG_SEED_STATE_PARTIAL and let it restore the original value
+otherwise.
+
+There is no change in behaviour.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Stephan Müller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/drbg.c | 30 +++++++++++++++++++++---------
+ 1 file changed, 21 insertions(+), 9 deletions(-)
+
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1046,6 +1046,27 @@ static inline int __drbg_seed(struct drb
+ /* 10.1.1.2 / 10.1.1.3 step 5 */
+ drbg->reseed_ctr = 1;
+
++ switch (drbg->seeded) {
++ case DRBG_SEED_STATE_UNSEEDED:
++ /* Impossible, but handle it to silence compiler warnings. */
++ fallthrough;
++ case DRBG_SEED_STATE_PARTIAL:
++ /*
++ * Require frequent reseeds until the seed source is
++ * fully initialized.
++ */
++ drbg->reseed_threshold = 50;
++ break;
++
++ case DRBG_SEED_STATE_FULL:
++ /*
++ * Seed source has become fully initialized, frequent
++ * reseeds no longer required.
++ */
++ drbg->reseed_threshold = drbg_max_requests(drbg);
++ break;
++ }
++
+ return ret;
+ }
+
+@@ -1094,9 +1115,6 @@ static void drbg_async_seed(struct work_
+
+ __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
+
+- if (drbg->seeded == DRBG_SEED_STATE_FULL)
+- drbg->reseed_threshold = drbg_max_requests(drbg);
+-
+ unlock:
+ mutex_unlock(&drbg->drbg_mutex);
+
+@@ -1532,12 +1550,6 @@ static int drbg_prepare_hrng(struct drbg
+ return err;
+ }
+
+- /*
+- * Require frequent reseeds until the seed source is fully
+- * initialized.
+- */
+- drbg->reseed_threshold = 50;
+-
+ return err;
+ }
+
--- /dev/null
+From foo@baz Fri Jun 3 04:46:19 PM CEST 2022
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 2 Jun 2022 22:22:29 +0200
+Subject: crypto: drbg - prepare for more fine-grained tracking of seeding state
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Message-ID: <20220602202232.281326-3-Jason@zx2c4.com>
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit ce8ce31b2c5c8b18667784b8c515650c65d57b4e upstream.
+
+There are two different randomness sources the DRBGs are getting seeded
+from, namely the jitterentropy source (if enabled) and get_random_bytes().
+At initial DRBG seeding time during boot, the latter might not have
+collected sufficient entropy for seeding itself yet and thus, the DRBG
+implementation schedules a reseed work from a random_ready_callback once
+that has happened. This is particularly important for the !->pr DRBG
+instances, for which (almost) no further reseeds are getting triggered
+during their lifetime.
+
+Because collecting data from the jitterentropy source is a rather expensive
+operation, the aforementioned asynchronously scheduled reseed work
+restricts itself to get_random_bytes() only. That is, it in some sense
+amends the initial DRBG seed derived from jitterentropy output at full
+(estimated) entropy with fresh randomness obtained from get_random_bytes()
+once that has been seeded with sufficient entropy itself.
+
+With the advent of rng_is_initialized(), there is no real need for doing
+the reseed operation from an asynchronously scheduled work anymore and a
+subsequent patch will make it synchronous by moving it next to related
+logic already present in drbg_generate().
+
+However, for tracking whether a full reseed including the jitterentropy
+source is required or a "partial" reseed involving only get_random_bytes()
+would be sufficient already, the boolean struct drbg_state's ->seeded
+member must become a tristate value.
+
+Prepare for this by introducing the new enum drbg_seed_state and change
+struct drbg_state's ->seeded member's type from bool to that type.
+
+For facilitating review, enum drbg_seed_state is made to only contain
+two members corresponding to the former ->seeded values of false and true
+resp. at this point: DRBG_SEED_STATE_UNSEEDED and DRBG_SEED_STATE_FULL. A
+third one for tracking the intermediate state of "seeded from jitterentropy
+only" will be introduced with a subsequent patch.
+
+There is no change in behaviour at this point.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Stephan Müller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/drbg.c | 19 ++++++++++---------
+ include/crypto/drbg.h | 7 ++++++-
+ 2 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1042,7 +1042,7 @@ static inline int __drbg_seed(struct drb
+ if (ret)
+ return ret;
+
+- drbg->seeded = true;
++ drbg->seeded = DRBG_SEED_STATE_FULL;
+ /* 10.1.1.2 / 10.1.1.3 step 5 */
+ drbg->reseed_ctr = 1;
+
+@@ -1087,14 +1087,14 @@ static void drbg_async_seed(struct work_
+ if (ret)
+ goto unlock;
+
+- /* Set seeded to false so that if __drbg_seed fails the
+- * next generate call will trigger a reseed.
++ /* Reset ->seeded so that if __drbg_seed fails the next
++ * generate call will trigger a reseed.
+ */
+- drbg->seeded = false;
++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
+
+ __drbg_seed(drbg, &seedlist, true);
+
+- if (drbg->seeded)
++ if (drbg->seeded == DRBG_SEED_STATE_FULL)
+ drbg->reseed_threshold = drbg_max_requests(drbg);
+
+ unlock:
+@@ -1385,13 +1385,14 @@ static int drbg_generate(struct drbg_sta
+ * here. The spec is a bit convoluted here, we make it simpler.
+ */
+ if (drbg->reseed_threshold < drbg->reseed_ctr)
+- drbg->seeded = false;
++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
+
+- if (drbg->pr || !drbg->seeded) {
++ if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
+ pr_devel("DRBG: reseeding before generation (prediction "
+ "resistance: %s, state %s)\n",
+ drbg->pr ? "true" : "false",
+- drbg->seeded ? "seeded" : "unseeded");
++ (drbg->seeded == DRBG_SEED_STATE_FULL ?
++ "seeded" : "unseeded"));
+ /* 9.3.1 steps 7.1 through 7.3 */
+ len = drbg_seed(drbg, addtl, true);
+ if (len)
+@@ -1576,7 +1577,7 @@ static int drbg_instantiate(struct drbg_
+ if (!drbg->core) {
+ drbg->core = &drbg_cores[coreref];
+ drbg->pr = pr;
+- drbg->seeded = false;
++ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
+ drbg->reseed_threshold = drbg_max_requests(drbg);
+
+ ret = drbg_alloc_state(drbg);
+--- a/include/crypto/drbg.h
++++ b/include/crypto/drbg.h
+@@ -105,6 +105,11 @@ struct drbg_test_data {
+ struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+ };
+
++enum drbg_seed_state {
++ DRBG_SEED_STATE_UNSEEDED,
++ DRBG_SEED_STATE_FULL,
++};
++
+ struct drbg_state {
+ struct mutex drbg_mutex; /* lock around DRBG */
+ unsigned char *V; /* internal state 10.1.1.1 1a) */
+@@ -127,7 +132,7 @@ struct drbg_state {
+ struct crypto_wait ctr_wait; /* CTR mode async wait obj */
+ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
+
+- bool seeded; /* DRBG fully seeded? */
++ enum drbg_seed_state seeded; /* DRBG fully seeded? */
+ bool pr; /* Prediction resistance enabled? */
+ bool fips_primed; /* Continuous test primed? */
+ unsigned char *prev; /* FIPS 140-2 continuous test value */
--- /dev/null
+From foo@baz Fri Jun 3 04:46:19 PM CEST 2022
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 2 Jun 2022 22:22:30 +0200
+Subject: crypto: drbg - track whether DRBG was seeded with !rng_is_initialized()
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Message-ID: <20220602202232.281326-4-Jason@zx2c4.com>
+
+From: Nicolai Stange <nstange@suse.de>
+
+commit 2bcd25443868aa8863779a6ebc6c9319633025d2 upstream.
+
+Currently, the DRBG implementation schedules asynchronous works from
+random_ready_callbacks for reseeding the DRBG instances with output from
+get_random_bytes() once the latter has sufficient entropy available.
+
+However, as the get_random_bytes() initialization state can get queried by
+means of rng_is_initialized() now, there is no real need for this
+asynchronous reseeding logic anymore and it's better to keep things simple
+by doing it synchronously when needed instead, i.e. from drbg_generate()
+once rng_is_initialized() has flipped to true.
+
+Of course, for this to work, drbg_generate() would need some means by which
+it can tell whether or not rng_is_initialized() has flipped to true since
+the last seeding from get_random_bytes(). Or equivalently, whether or not
+the last seed from get_random_bytes() has happened when
+rng_is_initialized() was still evaluating to false.
+
+As it currently stands, enum drbg_seed_state allows for the representation
+of two different DRBG seeding states: DRBG_SEED_STATE_UNSEEDED and
+DRBG_SEED_STATE_FULL. The former makes drbg_generate() to invoke a full
+reseeding operation involving both, the rather expensive jitterentropy as
+well as the get_random_bytes() randomness sources. The DRBG_SEED_STATE_FULL
+state on the other hand implies that no reseeding at all is required for a
+!->pr DRBG variant.
+
+Introduce the new DRBG_SEED_STATE_PARTIAL state to enum drbg_seed_state for
+representing the condition that a DRBG was being seeded when
+rng_is_initialized() had still been false. In particular, this new state
+implies that
+- the given DRBG instance has been fully seeded from the jitterentropy
+ source (if enabled)
+- and drbg_generate() is supposed to reseed from get_random_bytes()
+ *only* once rng_is_initialized() turns to true.
+
+Up to now, the __drbg_seed() helper used to set the given DRBG instance's
+->seeded state to constant DRBG_SEED_STATE_FULL. Introduce a new argument
+allowing for the specification of the to be written ->seeded value instead.
+Make the first of its two callers, drbg_seed(), determine the appropriate
+value based on rng_is_initialized(). The remaining caller,
+drbg_async_seed(), is known to get invoked only once rng_is_initialized()
+is true, hence let it pass constant DRBG_SEED_STATE_FULL for the new
+argument to __drbg_seed().
+
+There is no change in behaviour, except for that the pr_devel() in
+drbg_generate() would now report "unseeded" for ->pr DRBG instances which
+had last been seeded when rng_is_initialized() was still evaluating to
+false.
+
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Reviewed-by: Stephan Müller <smueller@chronox.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/drbg.c | 12 ++++++++----
+ include/crypto/drbg.h | 1 +
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1035,14 +1035,14 @@ static const struct drbg_state_ops drbg_
+ ******************************************************************/
+
+ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
+- int reseed)
++ int reseed, enum drbg_seed_state new_seed_state)
+ {
+ int ret = drbg->d_ops->update(drbg, seed, reseed);
+
+ if (ret)
+ return ret;
+
+- drbg->seeded = DRBG_SEED_STATE_FULL;
++ drbg->seeded = new_seed_state;
+ /* 10.1.1.2 / 10.1.1.3 step 5 */
+ drbg->reseed_ctr = 1;
+
+@@ -1092,7 +1092,7 @@ static void drbg_async_seed(struct work_
+ */
+ drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
+
+- __drbg_seed(drbg, &seedlist, true);
++ __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
+
+ if (drbg->seeded == DRBG_SEED_STATE_FULL)
+ drbg->reseed_threshold = drbg_max_requests(drbg);
+@@ -1122,6 +1122,7 @@ static int drbg_seed(struct drbg_state *
+ unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
+ struct drbg_string data1;
+ LIST_HEAD(seedlist);
++ enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
+
+ /* 9.1 / 9.2 / 9.3.1 step 3 */
+ if (pers && pers->len > (drbg_max_addtl(drbg))) {
+@@ -1149,6 +1150,9 @@ static int drbg_seed(struct drbg_state *
+ BUG_ON((entropylen * 2) > sizeof(entropy));
+
+ /* Get seed from in-kernel /dev/urandom */
++ if (!rng_is_initialized())
++ new_seed_state = DRBG_SEED_STATE_PARTIAL;
++
+ ret = drbg_get_random_bytes(drbg, entropy, entropylen);
+ if (ret)
+ goto out;
+@@ -1205,7 +1209,7 @@ static int drbg_seed(struct drbg_state *
+ memset(drbg->C, 0, drbg_statelen(drbg));
+ }
+
+- ret = __drbg_seed(drbg, &seedlist, reseed);
++ ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
+
+ out:
+ memzero_explicit(entropy, entropylen * 2);
+--- a/include/crypto/drbg.h
++++ b/include/crypto/drbg.h
+@@ -107,6 +107,7 @@ struct drbg_test_data {
+
+ enum drbg_seed_state {
+ DRBG_SEED_STATE_UNSEEDED,
++ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
+ DRBG_SEED_STATE_FULL,
+ };
+
--- /dev/null
+From 7cc7ab73f83ee6d50dc9536bc3355495d8600fad Mon Sep 17 00:00:00 2001
+From: Vitaly Chikunov <vt@altlinux.org>
+Date: Thu, 21 Apr 2022 20:25:10 +0300
+Subject: crypto: ecrdsa - Fix incorrect use of vli_cmp
+
+From: Vitaly Chikunov <vt@altlinux.org>
+
+commit 7cc7ab73f83ee6d50dc9536bc3355495d8600fad upstream.
+
+Correctly compare values that shall be greater-or-equal and not just
+greater.
+
+Fixes: 0d7a78643f69 ("crypto: ecrdsa - add EC-RDSA (GOST 34.10) algorithm")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Vitaly Chikunov <vt@altlinux.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/ecrdsa.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/crypto/ecrdsa.c
++++ b/crypto/ecrdsa.c
+@@ -113,15 +113,15 @@ static int ecrdsa_verify(struct akcipher
+
+ /* Step 1: verify that 0 < r < q, 0 < s < q */
+ if (vli_is_zero(r, ndigits) ||
+- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
+ vli_is_zero(s, ndigits) ||
+- vli_cmp(s, ctx->curve->n, ndigits) == 1)
++ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
+ return -EKEYREJECTED;
+
+ /* Step 2: calculate hash (h) of the message (passed as input) */
+ /* Step 3: calculate e = h \mod q */
+ vli_from_le64(e, digest, ndigits);
+- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
+ vli_sub(e, e, ctx->curve->n, ndigits);
+ if (vli_is_zero(e, ndigits))
+ e[0] = 1;
+@@ -137,7 +137,7 @@ static int ecrdsa_verify(struct akcipher
+ /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
+ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
+ ctx->curve);
+- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
+ vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
+
+ /* Step 7: if R == r signature is valid */
--- /dev/null
+From 567dd8f34560fa221a6343729474536aa7ede4fd Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 25 Apr 2022 08:53:29 -0400
+Subject: dm crypt: make printing of the key constant-time
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 567dd8f34560fa221a6343729474536aa7ede4fd upstream.
+
+The device mapper dm-crypt target is using scnprintf("%02x", cc->key[i]) to
+report the current key to userspace. However, this is not a constant-time
+operation and it may leak information about the key via timing, via cache
+access patterns or via the branch predictor.
+
+Change dm-crypt's key printing to use "%c" instead of "%02x". Also
+introduce hex2asc() that carefully avoids any branching or memory
+accesses when converting a number in the range 0 ... 15 to an ascii
+character.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Tested-by: Milan Broz <gmazyland@gmail.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-crypt.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -3404,6 +3404,11 @@ static int crypt_map(struct dm_target *t
+ return DM_MAPIO_SUBMITTED;
+ }
+
++static char hex2asc(unsigned char c)
++{
++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
++}
++
+ static void crypt_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+ {
+@@ -3422,9 +3427,12 @@ static void crypt_status(struct dm_targe
+ if (cc->key_size > 0) {
+ if (cc->key_string)
+ DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+- else
+- for (i = 0; i < cc->key_size; i++)
+- DMEMIT("%02x", cc->key[i]);
++ else {
++ for (i = 0; i < cc->key_size; i++) {
++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
++ hex2asc(cc->key[i] & 0xf));
++ }
++ }
+ } else
+ DMEMIT("-");
+
--- /dev/null
+From d3f2a14b8906df913cb04a706367b012db94a6e8 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 25 Apr 2022 14:56:48 +0300
+Subject: dm integrity: fix error code in dm_integrity_ctr()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d3f2a14b8906df913cb04a706367b012db94a6e8 upstream.
+
+The "r" variable shadows an earlier "r" that has function scope. It
+means that we accidentally return success instead of an error code.
+Smatch has a warning for this:
+
+ drivers/md/dm-integrity.c:4503 dm_integrity_ctr()
+ warn: missing error code 'r'
+
+Fixes: 7eada909bfd7 ("dm: add integrity target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-integrity.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4327,8 +4327,6 @@ try_smaller_buffer:
+ }
+
+ if (should_write_sb) {
+- int r;
+-
+ init_journal(ic, 0, ic->journal_sections, 0);
+ r = dm_integrity_failed(ic);
+ if (unlikely(r)) {
--- /dev/null
+From bfe2b0146c4d0230b68f5c71a64380ff8d361f8b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Sun, 24 Apr 2022 16:43:00 -0400
+Subject: dm stats: add cond_resched when looping over entries
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit bfe2b0146c4d0230b68f5c71a64380ff8d361f8b upstream.
+
+dm-stats can be used with a very large number of entries (it is only
+limited by 1/4 of total system memory), so add rescheduling points to
+the loops that iterate over the entries.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-stats.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *s
+ atomic_read(&shared->in_flight[READ]),
+ atomic_read(&shared->in_flight[WRITE]));
+ }
++ cond_resched();
+ }
+ dm_stat_free(&s->rcu_head);
+ }
+@@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_sta
+ for (ni = 0; ni < n_entries; ni++) {
+ atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+ atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
++ cond_resched();
+ }
+
+ if (s->n_histogram_entries) {
+@@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_sta
+ for (ni = 0; ni < n_entries; ni++) {
+ s->stat_shared[ni].tmp.histogram = hi;
+ hi += s->n_histogram_entries + 1;
++ cond_resched();
+ }
+ }
+
+@@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_sta
+ for (ni = 0; ni < n_entries; ni++) {
+ p[ni].histogram = hi;
+ hi += s->n_histogram_entries + 1;
++ cond_resched();
+ }
+ }
+ }
+@@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats
+ }
+ DMEMIT("\n");
+ }
++ cond_resched();
+ }
+ mutex_unlock(&stats->mutex);
+
+@@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_st
+ local_irq_enable();
+ }
+ }
++ cond_resched();
+ }
+ }
+
+@@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stat
+
+ if (unlikely(sz + 1 >= maxlen))
+ goto buffer_overflow;
++
++ cond_resched();
+ }
+
+ if (clear)
--- /dev/null
+From 4caae58406f8ceb741603eee460d79bacca9b1b5 Mon Sep 17 00:00:00 2001
+From: Sarthak Kukreti <sarthakkukreti@google.com>
+Date: Tue, 31 May 2022 15:56:40 -0400
+Subject: dm verity: set DM_TARGET_IMMUTABLE feature flag
+
+From: Sarthak Kukreti <sarthakkukreti@google.com>
+
+commit 4caae58406f8ceb741603eee460d79bacca9b1b5 upstream.
+
+The device-mapper framework provides a mechanism to mark targets as
+immutable (and hence fail table reloads that try to change the target
+type). Add the DM_TARGET_IMMUTABLE flag to the dm-verity target's
+feature flags to prevent switching the verity target with a different
+target type.
+
+Fixes: a4ffc152198e ("dm: add verity target")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sarthak Kukreti <sarthakkukreti@google.com>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-target.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -1242,6 +1242,7 @@ bad:
+
+ static struct target_type verity_target = {
+ .name = "verity",
++ .features = DM_TARGET_IMMUTABLE,
+ .version = {1, 7, 0},
+ .module = THIS_MODULE,
+ .ctr = verity_ctr,
--- /dev/null
+From 336feb502a715909a8136eb6a62a83d7268a353b Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Date: Wed, 27 Apr 2022 17:47:14 -0500
+Subject: drm/i915: Fix -Wstringop-overflow warning in call to intel_read_wm_latency()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Gustavo A. R. Silva <gustavoars@kernel.org>
+
+commit 336feb502a715909a8136eb6a62a83d7268a353b upstream.
+
+Fix the following -Wstringop-overflow warnings when building with GCC-11:
+
+drivers/gpu/drm/i915/intel_pm.c:3106:9: warning: ‘intel_read_wm_latency’ accessing 16 bytes in a region of size 10 [-Wstringop-overflow=]
+ 3106 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+drivers/gpu/drm/i915/intel_pm.c:3106:9: note: referencing argument 2 of type ‘u16 *’ {aka ‘short unsigned int *’}
+drivers/gpu/drm/i915/intel_pm.c:2861:13: note: in a call to function ‘intel_read_wm_latency’
+ 2861 | static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+ | ^~~~~~~~~~~~~~~~~~~~~
+
+by removing the over-specified array size from the argument declarations.
+
+It seems that this code is actually safe because the size of the
+array depends on the hardware generation, and the function checks
+for that.
+
+Notice that wm can be an array of 5 elements:
+drivers/gpu/drm/i915/intel_pm.c:3109: intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+
+or an array of 8 elements:
+drivers/gpu/drm/i915/intel_pm.c:3131: intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+
+and the compiler legitimately complains about that.
+
+This helps with the ongoing efforts to globally enable
+-Wstringop-overflow.
+
+Link: https://github.com/KSPP/linux/issues/181
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/intel_pm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2846,7 +2846,7 @@ static void ilk_compute_wm_level(const s
+ }
+
+ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
+- u16 wm[8])
++ u16 wm[])
+ {
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
--- /dev/null
+From 64ba4b15e5c045f8b746c6da5fc9be9a6b00b61d Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@linaro.org>
+Date: Tue, 17 May 2022 08:13:08 +0900
+Subject: exfat: check if cluster num is valid
+
+From: Tadeusz Struk <tadeusz.struk@linaro.org>
+
+commit 64ba4b15e5c045f8b746c6da5fc9be9a6b00b61d upstream.
+
+Syzbot reported slab-out-of-bounds read in exfat_clear_bitmap.
+This was triggered by reproducer calling truncute with size 0,
+which causes the following trace:
+
+BUG: KASAN: slab-out-of-bounds in exfat_clear_bitmap+0x147/0x490 fs/exfat/balloc.c:174
+Read of size 8 at addr ffff888115aa9508 by task syz-executor251/365
+
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack_lvl+0x1e2/0x24b lib/dump_stack.c:118
+ print_address_description+0x81/0x3c0 mm/kasan/report.c:233
+ __kasan_report mm/kasan/report.c:419 [inline]
+ kasan_report+0x1a4/0x1f0 mm/kasan/report.c:436
+ __asan_report_load8_noabort+0x14/0x20 mm/kasan/report_generic.c:309
+ exfat_clear_bitmap+0x147/0x490 fs/exfat/balloc.c:174
+ exfat_free_cluster+0x25a/0x4a0 fs/exfat/fatent.c:181
+ __exfat_truncate+0x99e/0xe00 fs/exfat/file.c:217
+ exfat_truncate+0x11b/0x4f0 fs/exfat/file.c:243
+ exfat_setattr+0xa03/0xd40 fs/exfat/file.c:339
+ notify_change+0xb76/0xe10 fs/attr.c:336
+ do_truncate+0x1ea/0x2d0 fs/open.c:65
+
+Move the is_valid_cluster() helper from fatent.c to a common
+header to make it reusable in other *.c files. And add is_valid_cluster()
+to validate if cluster number is within valid range in exfat_clear_bitmap()
+and exfat_set_bitmap().
+
+Link: https://syzkaller.appspot.com/bug?id=50381fc73821ecae743b8cf24b4c9a04776f767c
+Reported-by: syzbot+a4087e40b9c13aad7892@syzkaller.appspotmail.com
+Fixes: 1e49a94cf707 ("exfat: add bitmap operations")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Tadeusz Struk <tadeusz.struk@linaro.org>
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/exfat/balloc.c | 8 ++++++--
+ fs/exfat/exfat_fs.h | 8 ++++++++
+ fs/exfat/fatent.c | 8 --------
+ 3 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -148,7 +148,9 @@ int exfat_set_bitmap(struct inode *inode
+ struct super_block *sb = inode->i_sb;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++ if (!is_valid_cluster(sbi, clu))
++ return -EINVAL;
++
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+@@ -166,7 +168,9 @@ void exfat_clear_bitmap(struct inode *in
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+
+- WARN_ON(clu < EXFAT_FIRST_CLUSTER);
++ if (!is_valid_cluster(sbi, clu))
++ return;
++
+ ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
+ i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
+ b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -380,6 +380,14 @@ static inline int exfat_sector_to_cluste
+ EXFAT_RESERVED_CLUSTERS;
+ }
+
++static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
++ unsigned int clus)
++{
++ if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
++ return false;
++ return true;
++}
++
+ /* super.c */
+ int exfat_set_volume_dirty(struct super_block *sb);
+ int exfat_clear_volume_dirty(struct super_block *sb);
+--- a/fs/exfat/fatent.c
++++ b/fs/exfat/fatent.c
+@@ -81,14 +81,6 @@ int exfat_ent_set(struct super_block *sb
+ return 0;
+ }
+
+-static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
+- unsigned int clus)
+-{
+- if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
+- return false;
+- return true;
+-}
+-
+ int exfat_ent_get(struct super_block *sb, unsigned int loc,
+ unsigned int *content)
+ {
--- /dev/null
+From d8dad2588addd1d861ce19e7df3b702330f0c7e3 Mon Sep 17 00:00:00 2001
+From: Yuezhang Mo <Yuezhang.Mo@sony.com>
+Date: Mon, 4 Apr 2022 11:58:06 +0900
+Subject: exfat: fix referencing wrong parent directory information after renaming
+
+From: Yuezhang Mo <Yuezhang.Mo@sony.com>
+
+commit d8dad2588addd1d861ce19e7df3b702330f0c7e3 upstream.
+
+During renaming, the parent directory information maybe
+updated. But the file/directory still references to the
+old parent directory information.
+
+This bug will cause 2 problems.
+
+(1) The renamed file can not be written.
+
+ [10768.175172] exFAT-fs (sda1): error, failed to bmap (inode : 7afd50e4 iblock : 0, err : -5)
+ [10768.184285] exFAT-fs (sda1): Filesystem has been set read-only
+ ash: write error: Input/output error
+
+(2) Some dentries of the renamed file/directory are not set
+ to deleted after removing the file/directory.
+
+exfat_update_parent_info() is a workaround for the wrong parent
+directory information being used after renaming. Now that bug is
+fixed, this is no longer needed, so remove it.
+
+Fixes: 5f2aa075070c ("exfat: add inode operations")
+Cc: stable@vger.kernel.org # v5.7+
+Signed-off-by: Yuezhang Mo <Yuezhang.Mo@sony.com>
+Reviewed-by: Andy Wu <Andy.Wu@sony.com>
+Reviewed-by: Aoyama Wataru <wataru.aoyama@sony.com>
+Reviewed-by: Daniel Palmer <daniel.palmer@sony.com>
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/exfat/namei.c | 27 +--------------------------
+ 1 file changed, 1 insertion(+), 26 deletions(-)
+
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -1061,6 +1061,7 @@ static int exfat_rename_file(struct inod
+
+ exfat_remove_entries(inode, p_dir, oldentry, 0,
+ num_old_entries);
++ ei->dir = *p_dir;
+ ei->entry = newentry;
+ } else {
+ if (exfat_get_entry_type(epold) == TYPE_FILE) {
+@@ -1151,28 +1152,6 @@ static int exfat_move_file(struct inode
+ return 0;
+ }
+
+-static void exfat_update_parent_info(struct exfat_inode_info *ei,
+- struct inode *parent_inode)
+-{
+- struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb);
+- struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode);
+- loff_t parent_isize = i_size_read(parent_inode);
+-
+- /*
+- * the problem that struct exfat_inode_info caches wrong parent info.
+- *
+- * because of flag-mismatch of ei->dir,
+- * there is abnormal traversing cluster chain.
+- */
+- if (unlikely(parent_ei->flags != ei->dir.flags ||
+- parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) ||
+- parent_ei->start_clu != ei->dir.dir)) {
+- exfat_chain_set(&ei->dir, parent_ei->start_clu,
+- EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi),
+- parent_ei->flags);
+- }
+-}
+-
+ /* rename or move a old file into a new file */
+ static int __exfat_rename(struct inode *old_parent_inode,
+ struct exfat_inode_info *ei, struct inode *new_parent_inode,
+@@ -1203,8 +1182,6 @@ static int __exfat_rename(struct inode *
+ return -ENOENT;
+ }
+
+- exfat_update_parent_info(ei, old_parent_inode);
+-
+ exfat_chain_dup(&olddir, &ei->dir);
+ dentry = ei->entry;
+
+@@ -1225,8 +1202,6 @@ static int __exfat_rename(struct inode *
+ goto out;
+ }
+
+- exfat_update_parent_info(new_ei, new_parent_inode);
+-
+ p_dir = &(new_ei->dir);
+ new_entry = new_ei->entry;
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
--- /dev/null
+From 95cd2cdc88c755dcd0a58b951faeb77742c733a4 Mon Sep 17 00:00:00 2001
+From: Tao Jin <tao-j@outlook.com>
+Date: Sun, 3 Apr 2022 12:57:44 -0400
+Subject: HID: multitouch: add quirks to enable Lenovo X12 trackpoint
+
+From: Tao Jin <tao-j@outlook.com>
+
+commit 95cd2cdc88c755dcd0a58b951faeb77742c733a4 upstream.
+
+This applies the similar quirks used by previous generation devices
+such as X1 tablet for X12 tablet, so that the trackpoint and buttons
+can work.
+
+This patch was applied and tested working on 5.17.1 .
+
+Cc: stable@vger.kernel.org # 5.8+ given that it relies on 40d5bb87377a
+Signed-off-by: Tao Jin <tao-j@outlook.com>
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Link: https://lore.kernel.org/r/CO6PR03MB6241CB276FCDC7F4CEDC34F6E1E29@CO6PR03MB6241.namprd03.prod.outlook.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/hid-ids.h | 1 +
+ drivers/hid/hid-multitouch.c | 6 ++++++
+ 2 files changed, 7 insertions(+)
+
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -743,6 +743,7 @@
+ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
+ #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
++#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
+ #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1990,6 +1990,12 @@ static const struct hid_device_id mt_dev
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB3) },
+
++ /* Lenovo X12 TAB Gen 1 */
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_LENOVO,
++ USB_DEVICE_ID_LENOVO_X12_TAB) },
++
+ /* MosArt panels */
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
--- /dev/null
+From 1d07cef7fd7599450b3d03e1915efc2a96e1f03f Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Ma=C5=9Blanka?= <mm@semihalf.com>
+Date: Tue, 5 Apr 2022 17:04:07 +0200
+Subject: HID: multitouch: Add support for Google Whiskers Touchpad
+
+From: Marek Maślanka <mm@semihalf.com>
+
+commit 1d07cef7fd7599450b3d03e1915efc2a96e1f03f upstream.
+
+The Google Whiskers touchpad does not work properly with the default
+multitouch configuration. Instead, use the same configuration as Google
+Rose.
+
+Signed-off-by: Marek Maslanka <mm@semihalf.com>
+Acked-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/hid-multitouch.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -2129,6 +2129,9 @@ static const struct hid_device_id mt_dev
+ { .driver_data = MT_CLS_GOOGLE,
+ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
+ USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
++ { .driver_data = MT_CLS_GOOGLE,
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
+
+ /* Generic MT device */
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
--- /dev/null
+From 300981abddcb13f8f06ad58f52358b53a8096775 Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Thu, 14 Apr 2022 14:21:03 +0800
+Subject: KVM: PPC: Book3S HV: fix incorrect NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 300981abddcb13f8f06ad58f52358b53a8096775 upstream.
+
+The bug is here:
+ if (!p)
+ return ret;
+
+The list iterator value 'p' will *always* be set and non-NULL by
+list_for_each_entry(), so it is incorrect to assume that the iterator
+value will be NULL if the list is empty or no element is found.
+
+To fix the bug, Use a new value 'iter' as the list iterator, while use
+the old value 'p' as a dedicated variable to point to the found element.
+
+Fixes: dfaa973ae960 ("KVM: PPC: Book3S HV: In H_SVM_INIT_DONE, migrate remaining normal-GFNs to secure-GFNs")
+Cc: stable@vger.kernel.org # v5.9+
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20220414062103.8153-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/book3s_hv_uvmem.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
++++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
+@@ -359,13 +359,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsi
+ static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
+ struct kvm *kvm, unsigned long *gfn)
+ {
+- struct kvmppc_uvmem_slot *p;
++ struct kvmppc_uvmem_slot *p = NULL, *iter;
+ bool ret = false;
+ unsigned long i;
+
+- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
+- if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
++ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
++ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
++ p = iter;
+ break;
++ }
+ if (!p)
+ return ret;
+ /*
--- /dev/null
+From fee060cd52d69c114b62d1a2948ea9648b5131f9 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 11 Mar 2022 03:27:41 +0000
+Subject: KVM: x86: avoid calling x86 emulator without a decoded instruction
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit fee060cd52d69c114b62d1a2948ea9648b5131f9 upstream.
+
+Whenever x86_decode_emulated_instruction() detects a breakpoint, it
+returns the value that kvm_vcpu_check_breakpoint() writes into its
+pass-by-reference second argument. Unfortunately this is completely
+bogus because the expected outcome of x86_decode_emulated_instruction
+is an EMULATION_* value.
+
+Then, if kvm_vcpu_check_breakpoint() does "*r = 0" (corresponding to
+a KVM_EXIT_DEBUG userspace exit), it is misunderstood as EMULATION_OK
+and x86_emulate_instruction() is called without having decoded the
+instruction. This causes various havoc from running with a stale
+emulation context.
+
+The fix is to move the call to kvm_vcpu_check_breakpoint() where it was
+before commit 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction
+emulation with decoding") introduced x86_decode_emulated_instruction().
+The other caller of the function does not need breakpoint checks,
+because it is invoked as part of a vmexit and the processor has already
+checked those before executing the instruction that #GP'd.
+
+This fixes CVE-2022-1852.
+
+Reported-by: Qiuhao Li <qiuhao@sysec.org>
+Reported-by: Gaoning Pan <pgn@zju.edu.cn>
+Reported-by: Yongkang Jia <kangel@zju.edu.cn>
+Fixes: 4aa2691dcbd3 ("KVM: x86: Factor out x86 instruction emulation with decoding")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20220311032801.3467418-2-seanjc@google.com>
+[Rewrote commit message according to Qiuhao's report, since a patch
+ already existed to fix the bug. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 31 +++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7295,7 +7295,7 @@ int kvm_skip_emulated_instruction(struct
+ }
+ EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+
+-static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
++static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
+ {
+ if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
+ (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
+@@ -7364,25 +7364,23 @@ static bool is_vmware_backdoor_opcode(st
+ }
+
+ /*
+- * Decode to be emulated instruction. Return EMULATION_OK if success.
++ * Decode an instruction for emulation. The caller is responsible for handling
++ * code breakpoints. Note, manually detecting code breakpoints is unnecessary
++ * (and wrong) when emulating on an intercepted fault-like exception[*], as
++ * code breakpoints have higher priority and thus have already been done by
++ * hardware.
++ *
++ * [*] Except #MC, which is higher priority, but KVM should never emulate in
++ * response to a machine check.
+ */
+ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
+ void *insn, int insn_len)
+ {
+- int r = EMULATION_OK;
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
++ int r;
+
+ init_emulate_ctxt(vcpu);
+
+- /*
+- * We will reenter on the same instruction since we do not set
+- * complete_userspace_io. This does not handle watchpoints yet,
+- * those would be handled in the emulate_ops.
+- */
+- if (!(emulation_type & EMULTYPE_SKIP) &&
+- kvm_vcpu_check_breakpoint(vcpu, &r))
+- return r;
+-
+ ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
+
+ r = x86_decode_insn(ctxt, insn, insn_len);
+@@ -7417,6 +7415,15 @@ int x86_emulate_instruction(struct kvm_v
+ if (!(emulation_type & EMULTYPE_NO_DECODE)) {
+ kvm_clear_exception_queue(vcpu);
+
++ /*
++ * Return immediately if RIP hits a code breakpoint, such #DBs
++ * are fault-like and are higher priority than any faults on
++ * the code fetch itself.
++ */
++ if (!(emulation_type & EMULTYPE_SKIP) &&
++ kvm_vcpu_check_code_breakpoint(vcpu, &r))
++ return r;
++
+ r = x86_decode_emulated_instruction(vcpu, emulation_type,
+ insn, insn_len);
+ if (r != EMULATION_OK) {
--- /dev/null
+From foo@baz Fri Jun 3 04:46:19 PM CEST 2022
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Thu, 2 Jun 2022 22:22:28 +0200
+Subject: lib/crypto: add prompts back to crypto libraries
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org
+Message-ID: <20220602202232.281326-2-Jason@zx2c4.com>
+
+From: "Justin M. Forbes" <jforbes@fedoraproject.org>
+
+commit e56e18985596617ae426ed5997fb2e737cffb58b upstream.
+
+Commit 6048fdcc5f269 ("lib/crypto: blake2s: include as built-in") took
+away a number of prompt texts from other crypto libraries. This makes
+values flip from built-in to module when oldconfig runs, and causes
+problems when these crypto libs need to be built in for thingslike
+BIG_KEYS.
+
+Fixes: 6048fdcc5f269 ("lib/crypto: blake2s: include as built-in")
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: linux-crypto@vger.kernel.org
+Signed-off-by: Justin M. Forbes <jforbes@fedoraproject.org>
+[Jason: - moved menu into submenu of lib/ instead of root menu
+ - fixed chacha sub-dependencies for CONFIG_CRYPTO]
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/Kconfig | 2 --
+ lib/Kconfig | 2 ++
+ lib/crypto/Kconfig | 17 ++++++++++++-----
+ 3 files changed, 14 insertions(+), 7 deletions(-)
+
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1941,5 +1941,3 @@ source "crypto/asymmetric_keys/Kconfig"
+ source "certs/Kconfig"
+
+ endif # if CRYPTO
+-
+-source "lib/crypto/Kconfig"
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -101,6 +101,8 @@ config INDIRECT_PIO
+
+ When in doubt, say N.
+
++source "lib/crypto/Kconfig"
++
+ config CRC_CCITT
+ tristate "CRC-CCITT functions"
+ help
+--- a/lib/crypto/Kconfig
++++ b/lib/crypto/Kconfig
+@@ -1,5 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+
++menu "Crypto library routines"
++
+ config CRYPTO_LIB_AES
+ tristate
+
+@@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
+
+ config CRYPTO_LIB_CHACHA_GENERIC
+ tristate
+- select CRYPTO_ALGAPI
++ select XOR_BLOCKS
+ help
+ This symbol can be depended upon by arch implementations of the
+ ChaCha library interface that require the generic code as a
+@@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC
+ of CRYPTO_LIB_CHACHA.
+
+ config CRYPTO_LIB_CHACHA
+- tristate
++ tristate "ChaCha library interface"
++ depends on CRYPTO
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ help
+@@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC
+ of CRYPTO_LIB_CURVE25519.
+
+ config CRYPTO_LIB_CURVE25519
+- tristate
++ tristate "Curve25519 scalar multiplication library"
+ depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+ help
+@@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC
+ of CRYPTO_LIB_POLY1305.
+
+ config CRYPTO_LIB_POLY1305
+- tristate
++ tristate "Poly1305 library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ help
+@@ -109,11 +112,15 @@ config CRYPTO_LIB_POLY1305
+ is available and enabled.
+
+ config CRYPTO_LIB_CHACHA20POLY1305
+- tristate
++ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
++ depends on CRYPTO
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
++ select CRYPTO_ALGAPI
+
+ config CRYPTO_LIB_SHA256
+ tristate
++
++endmenu
--- /dev/null
+From c5794097b269f15961ed78f7f27b50e51766dec9 Mon Sep 17 00:00:00 2001
+From: Alex Elder <elder@linaro.org>
+Date: Thu, 21 Apr 2022 13:53:33 -0500
+Subject: net: ipa: compute proper aggregation limit
+
+From: Alex Elder <elder@linaro.org>
+
+commit c5794097b269f15961ed78f7f27b50e51766dec9 upstream.
+
+The aggregation byte limit for an endpoint is currently computed
+based on the endpoint's receive buffer size.
+
+However, some bytes at the front of each receive buffer are reserved
+on the assumption that--as with SKBs--it might be useful to insert
+data (such as headers) before what lands in the buffer.
+
+The aggregation byte limit currently doesn't take into account that
+reserved space, and as a result, aggregation could require space
+past that which is available in the buffer.
+
+Fix this by reducing the size used to compute the aggregation byte
+limit by the NET_SKB_PAD offset reserved for each receive buffer.
+
+Signed-off-by: Alex Elder <elder@linaro.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ipa/ipa_endpoint.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -610,12 +610,14 @@ static void ipa_endpoint_init_aggr(struc
+
+ if (endpoint->data->aggregation) {
+ if (!endpoint->toward_ipa) {
++ u32 buffer_size;
+ u32 limit;
+
+ val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+
+- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
++ buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
++ limit = ipa_aggr_size_kb(buffer_size);
+ val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+
+ limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
--- /dev/null
+From 56b14ecec97f39118bf85c9ac2438c5a949509ed Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Fri, 20 May 2022 00:02:04 +0200
+Subject: netfilter: conntrack: re-fetch conntrack after insertion
+
+From: Florian Westphal <fw@strlen.de>
+
+commit 56b14ecec97f39118bf85c9ac2438c5a949509ed upstream.
+
+In case the conntrack is clashing, insertion can free skb->_nfct and
+set skb->_nfct to the already-confirmed entry.
+
+This wasn't found before because the conntrack entry and the extension
+space used to free'd after an rcu grace period, plus the race needs
+events enabled to trigger.
+
+Reported-by: <syzbot+793a590957d9c1b96620@syzkaller.appspotmail.com>
+Fixes: 71d8c47fc653 ("netfilter: conntrack: introduce clash resolution on insertion race")
+Fixes: 2ad9d7747c10 ("netfilter: conntrack: free extension area immediately")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/netfilter/nf_conntrack_core.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/include/net/netfilter/nf_conntrack_core.h
++++ b/include/net/netfilter/nf_conntrack_core.h
+@@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(s
+ int ret = NF_ACCEPT;
+
+ if (ct) {
+- if (!nf_ct_is_confirmed(ct))
++ if (!nf_ct_is_confirmed(ct)) {
+ ret = __nf_conntrack_confirm(skb);
++
++ if (ret == NF_ACCEPT)
++ ct = (struct nf_conn *)skb_nfct(skb);
++ }
++
+ if (likely(ret == NF_ACCEPT))
+ nf_ct_deliver_cached_events(ct);
+ }
--- /dev/null
+From fecf31ee395b0295f2d7260aa29946b7605f7c85 Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 27 May 2022 09:56:18 +0200
+Subject: netfilter: nf_tables: sanitize nft_set_desc_concat_parse()
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+commit fecf31ee395b0295f2d7260aa29946b7605f7c85 upstream.
+
+Add several sanity checks for nft_set_desc_concat_parse():
+
+- validate desc->field_count not larger than desc->field_len array.
+- field length cannot be larger than desc->field_len (ie. U8_MAX)
+- total length of the concatenation cannot be larger than register array.
+
+Joint work with Florian Westphal.
+
+Fixes: f3a2181e16f1 ("netfilter: nf_tables: Support for sets with multiple ranged fields")
+Reported-by: <zhangziming.zzm@antgroup.com>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4051,6 +4051,9 @@ static int nft_set_desc_concat_parse(con
+ u32 len;
+ int err;
+
++ if (desc->field_count >= ARRAY_SIZE(desc->field_len))
++ return -E2BIG;
++
+ err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr,
+ nft_concat_policy, NULL);
+ if (err < 0)
+@@ -4060,9 +4063,8 @@ static int nft_set_desc_concat_parse(con
+ return -EINVAL;
+
+ len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN]));
+-
+- if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT)
+- return -E2BIG;
++ if (!len || len > U8_MAX)
++ return -EINVAL;
+
+ desc->field_len[desc->field_count++] = len;
+
+@@ -4073,7 +4075,8 @@ static int nft_set_desc_concat(struct nf
+ const struct nlattr *nla)
+ {
+ struct nlattr *attr;
+- int rem, err;
++ u32 num_regs = 0;
++ int rem, err, i;
+
+ nla_for_each_nested(attr, nla, rem) {
+ if (nla_type(attr) != NFTA_LIST_ELEM)
+@@ -4084,6 +4087,12 @@ static int nft_set_desc_concat(struct nf
+ return err;
+ }
+
++ for (i = 0; i < desc->field_count; i++)
++ num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
++
++ if (num_regs > NFT_REG32_COUNT)
++ return -E2BIG;
++
+ return 0;
+ }
+
--- /dev/null
+From 57668f0a4cc4083a120cc8c517ca0055c4543b59 Mon Sep 17 00:00:00 2001
+From: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Date: Tue, 22 Mar 2022 16:23:39 +0100
+Subject: raid5: introduce MD_BROKEN
+
+From: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+
+commit 57668f0a4cc4083a120cc8c517ca0055c4543b59 upstream.
+
+Raid456 module had allowed to achieve failed state. It was fixed by
+fb73b357fb9 ("raid5: block failing device if raid will be failed").
+This fix introduces a bug, now if raid5 fails during IO, it may result
+with a hung task without completion. Faulty flag on the device is
+necessary to process all requests and is checked many times, mainly in
+analyze_stripe().
+Allow to set faulty on drive again and set MD_BROKEN if raid is failed.
+
+As a result, this level is allowed to achieve failed state again, but
+communication with userspace (via -EBUSY status) will be preserved.
+
+This restores possibility to fail array via #mdadm --set-faulty command
+and will be fixed by additional verification on mdadm side.
+
+Reproduction steps:
+ mdadm -CR imsm -e imsm -n 3 /dev/nvme[0-2]n1
+ mdadm -CR r5 -e imsm -l5 -n3 /dev/nvme[0-2]n1 --assume-clean
+ mkfs.xfs /dev/md126 -f
+ mount /dev/md126 /mnt/root/
+
+ fio --filename=/mnt/root/file --size=5GB --direct=1 --rw=randrw
+--bs=64k --ioengine=libaio --iodepth=64 --runtime=240 --numjobs=4
+--time_based --group_reporting --name=throughput-test-job
+--eta-newline=1 &
+
+ echo 1 > /sys/block/nvme2n1/device/device/remove
+ echo 1 > /sys/block/nvme1n1/device/device/remove
+
+ [ 1475.787779] Call Trace:
+ [ 1475.793111] __schedule+0x2a6/0x700
+ [ 1475.799460] schedule+0x38/0xa0
+ [ 1475.805454] raid5_get_active_stripe+0x469/0x5f0 [raid456]
+ [ 1475.813856] ? finish_wait+0x80/0x80
+ [ 1475.820332] raid5_make_request+0x180/0xb40 [raid456]
+ [ 1475.828281] ? finish_wait+0x80/0x80
+ [ 1475.834727] ? finish_wait+0x80/0x80
+ [ 1475.841127] ? finish_wait+0x80/0x80
+ [ 1475.847480] md_handle_request+0x119/0x190
+ [ 1475.854390] md_make_request+0x8a/0x190
+ [ 1475.861041] generic_make_request+0xcf/0x310
+ [ 1475.868145] submit_bio+0x3c/0x160
+ [ 1475.874355] iomap_dio_submit_bio.isra.20+0x51/0x60
+ [ 1475.882070] iomap_dio_bio_actor+0x175/0x390
+ [ 1475.889149] iomap_apply+0xff/0x310
+ [ 1475.895447] ? iomap_dio_bio_actor+0x390/0x390
+ [ 1475.902736] ? iomap_dio_bio_actor+0x390/0x390
+ [ 1475.909974] iomap_dio_rw+0x2f2/0x490
+ [ 1475.916415] ? iomap_dio_bio_actor+0x390/0x390
+ [ 1475.923680] ? atime_needs_update+0x77/0xe0
+ [ 1475.930674] ? xfs_file_dio_aio_read+0x6b/0xe0 [xfs]
+ [ 1475.938455] xfs_file_dio_aio_read+0x6b/0xe0 [xfs]
+ [ 1475.946084] xfs_file_read_iter+0xba/0xd0 [xfs]
+ [ 1475.953403] aio_read+0xd5/0x180
+ [ 1475.959395] ? _cond_resched+0x15/0x30
+ [ 1475.965907] io_submit_one+0x20b/0x3c0
+ [ 1475.972398] __x64_sys_io_submit+0xa2/0x180
+ [ 1475.979335] ? do_io_getevents+0x7c/0xc0
+ [ 1475.986009] do_syscall_64+0x5b/0x1a0
+ [ 1475.992419] entry_SYSCALL_64_after_hwframe+0x65/0xca
+ [ 1476.000255] RIP: 0033:0x7f11fc27978d
+ [ 1476.006631] Code: Bad RIP value.
+ [ 1476.073251] INFO: task fio:3877 blocked for more than 120 seconds.
+
+Cc: stable@vger.kernel.org
+Fixes: fb73b357fb9 ("raid5: block failing device if raid will be failed")
+Reviewd-by: Xiao Ni <xni@redhat.com>
+Signed-off-by: Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid5.c | 47 ++++++++++++++++++++++-------------------------
+ 1 file changed, 22 insertions(+), 25 deletions(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -686,17 +686,17 @@ int raid5_calc_degraded(struct r5conf *c
+ return degraded;
+ }
+
+-static int has_failed(struct r5conf *conf)
++static bool has_failed(struct r5conf *conf)
+ {
+- int degraded;
++ int degraded = conf->mddev->degraded;
+
+- if (conf->mddev->reshape_position == MaxSector)
+- return conf->mddev->degraded > conf->max_degraded;
++ if (test_bit(MD_BROKEN, &conf->mddev->flags))
++ return true;
+
+- degraded = raid5_calc_degraded(conf);
+- if (degraded > conf->max_degraded)
+- return 1;
+- return 0;
++ if (conf->mddev->reshape_position != MaxSector)
++ degraded = raid5_calc_degraded(conf);
++
++ return degraded > conf->max_degraded;
+ }
+
+ struct stripe_head *
+@@ -2877,34 +2877,31 @@ static void raid5_error(struct mddev *md
+ unsigned long flags;
+ pr_debug("raid456: error called\n");
+
++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
++ mdname(mddev), bdevname(rdev->bdev, b));
++
+ spin_lock_irqsave(&conf->device_lock, flags);
++ set_bit(Faulty, &rdev->flags);
++ clear_bit(In_sync, &rdev->flags);
++ mddev->degraded = raid5_calc_degraded(conf);
+
+- if (test_bit(In_sync, &rdev->flags) &&
+- mddev->degraded == conf->max_degraded) {
+- /*
+- * Don't allow to achieve failed state
+- * Don't try to recover this device
+- */
++ if (has_failed(conf)) {
++ set_bit(MD_BROKEN, &conf->mddev->flags);
+ conf->recovery_disabled = mddev->recovery_disabled;
+- spin_unlock_irqrestore(&conf->device_lock, flags);
+- return;
++
++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
++ mdname(mddev), mddev->degraded, conf->raid_disks);
++ } else {
++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
++ mdname(mddev), conf->raid_disks - mddev->degraded);
+ }
+
+- set_bit(Faulty, &rdev->flags);
+- clear_bit(In_sync, &rdev->flags);
+- mddev->degraded = raid5_calc_degraded(conf);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
+ set_bit(Blocked, &rdev->flags);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+- "md/raid:%s: Operation continuing on %d devices.\n",
+- mdname(mddev),
+- bdevname(rdev->bdev, b),
+- mdname(mddev),
+- conf->raid_disks - mddev->degraded);
+ r5c_update_on_rdev_error(mddev, rdev);
+ }
+
assoc_array-fix-bug_on-during-garbage-collect.patch
io_uring-don-t-re-import-iovecs-from-callbacks.patch
io_uring-fix-using-under-expanded-iters.patch
+net-ipa-compute-proper-aggregation-limit.patch
+xfs-detect-overflows-in-bmbt-records.patch
+xfs-show-the-proper-user-quota-options.patch
+xfs-fix-the-forward-progress-assertion-in-xfs_iwalk_run_callbacks.patch
+xfs-fix-an-abba-deadlock-in-xfs_rename.patch
+xfs-fix-cil-throttle-hang-when-cil-space-used-going-backwards.patch
+drm-i915-fix-wstringop-overflow-warning-in-call-to-intel_read_wm_latency.patch
+exfat-check-if-cluster-num-is-valid.patch
+exfat-fix-referencing-wrong-parent-directory-information-after-renaming.patch
+lib-crypto-add-prompts-back-to-crypto-libraries.patch
+crypto-drbg-prepare-for-more-fine-grained-tracking-of-seeding-state.patch
+crypto-drbg-track-whether-drbg-was-seeded-with-rng_is_initialized.patch
+crypto-drbg-move-dynamic-reseed_threshold-adjustments-to-__drbg_seed.patch
+crypto-drbg-make-reseeding-from-get_random_bytes-synchronous.patch
+netfilter-nf_tables-sanitize-nft_set_desc_concat_parse.patch
+netfilter-conntrack-re-fetch-conntrack-after-insertion.patch
+kvm-ppc-book3s-hv-fix-incorrect-null-check-on-list-iterator.patch
+x86-kvm-alloc-dummy-async-pf-token-outside-of-raw-spinlock.patch
+x86-kvm-use-correct-gfp-flags-for-preemption-disabled.patch
+kvm-x86-avoid-calling-x86-emulator-without-a-decoded-instruction.patch
+crypto-caam-fix-i.mx6sx-entropy-delay-value.patch
+crypto-ecrdsa-fix-incorrect-use-of-vli_cmp.patch
+zsmalloc-fix-races-between-asynchronous-zspage-free-and-page-migration.patch
+bluetooth-hci_qca-use-del_timer_sync-before-freeing.patch
+arm-dts-s5pv210-correct-interrupt-name-for-bluetooth-in-aries.patch
+dm-integrity-fix-error-code-in-dm_integrity_ctr.patch
+dm-crypt-make-printing-of-the-key-constant-time.patch
+dm-stats-add-cond_resched-when-looping-over-entries.patch
+dm-verity-set-dm_target_immutable-feature-flag.patch
+raid5-introduce-md_broken.patch
+hid-multitouch-add-support-for-google-whiskers-touchpad.patch
+hid-multitouch-add-quirks-to-enable-lenovo-x12-trackpoint.patch
--- /dev/null
+From 0547758a6de3cc71a0cfdd031a3621a30db6a68b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Thu, 19 May 2022 07:57:11 -0700
+Subject: x86/kvm: Alloc dummy async #PF token outside of raw spinlock
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 0547758a6de3cc71a0cfdd031a3621a30db6a68b upstream.
+
+Drop the raw spinlock in kvm_async_pf_task_wake() before allocating the
+the dummy async #PF token, the allocator is preemptible on PREEMPT_RT
+kernels and must not be called from truly atomic contexts.
+
+Opportunistically document why it's ok to loop on allocation failure,
+i.e. why the function won't get stuck in an infinite loop.
+
+Reported-by: Yajun Deng <yajun.deng@linux.dev>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kvm.c | 41 +++++++++++++++++++++++++++--------------
+ 1 file changed, 27 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
+ {
+ u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+- struct kvm_task_sleep_node *n;
++ struct kvm_task_sleep_node *n, *dummy = NULL;
+
+ if (token == ~0) {
+ apf_task_wake_all();
+@@ -200,28 +200,41 @@ again:
+ n = _find_apf_task(b, token);
+ if (!n) {
+ /*
+- * async PF was not yet handled.
+- * Add dummy entry for the token.
++ * Async #PF not yet handled, add a dummy entry for the token.
++ * Allocating the token must be down outside of the raw lock
++ * as the allocator is preemptible on PREEMPT_RT kernels.
+ */
+- n = kzalloc(sizeof(*n), GFP_ATOMIC);
+- if (!n) {
++ if (!dummy) {
++ raw_spin_unlock(&b->lock);
++ dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
++
+ /*
+- * Allocation failed! Busy wait while other cpu
+- * handles async PF.
++ * Continue looping on allocation failure, eventually
++ * the async #PF will be handled and allocating a new
++ * node will be unnecessary.
++ */
++ if (!dummy)
++ cpu_relax();
++
++ /*
++ * Recheck for async #PF completion before enqueueing
++ * the dummy token to avoid duplicate list entries.
+ */
+- raw_spin_unlock(&b->lock);
+- cpu_relax();
+ goto again;
+ }
+- n->token = token;
+- n->cpu = smp_processor_id();
+- init_swait_queue_head(&n->wq);
+- hlist_add_head(&n->link, &b->list);
++ dummy->token = token;
++ dummy->cpu = smp_processor_id();
++ init_swait_queue_head(&dummy->wq);
++ hlist_add_head(&dummy->link, &b->list);
++ dummy = NULL;
+ } else {
+ apf_task_wake_one(n);
+ }
+ raw_spin_unlock(&b->lock);
+- return;
++
++ /* A dummy token might be allocated and ultimately not used. */
++ if (dummy)
++ kfree(dummy);
+ }
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+
--- /dev/null
+From baec4f5a018fe2d708fc1022330dba04b38b5fe3 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 24 May 2022 09:43:31 -0400
+Subject: x86, kvm: use correct GFP flags for preemption disabled
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit baec4f5a018fe2d708fc1022330dba04b38b5fe3 upstream.
+
+Commit ddd7ed842627 ("x86/kvm: Alloc dummy async #PF token outside of
+raw spinlock") leads to the following Smatch static checker warning:
+
+ arch/x86/kernel/kvm.c:212 kvm_async_pf_task_wake()
+ warn: sleeping in atomic context
+
+arch/x86/kernel/kvm.c
+ 202 raw_spin_lock(&b->lock);
+ 203 n = _find_apf_task(b, token);
+ 204 if (!n) {
+ 205 /*
+ 206 * Async #PF not yet handled, add a dummy entry for the token.
+ 207 * Allocating the token must be down outside of the raw lock
+ 208 * as the allocator is preemptible on PREEMPT_RT kernels.
+ 209 */
+ 210 if (!dummy) {
+ 211 raw_spin_unlock(&b->lock);
+--> 212 dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+ ^^^^^^^^^^
+Smatch thinks the caller has preempt disabled. The `smdb.py preempt
+kvm_async_pf_task_wake` output call tree is:
+
+sysvec_kvm_asyncpf_interrupt() <- disables preempt
+-> __sysvec_kvm_asyncpf_interrupt()
+ -> kvm_async_pf_task_wake()
+
+The caller is this:
+
+arch/x86/kernel/kvm.c
+ 290 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
+ 291 {
+ 292 struct pt_regs *old_regs = set_irq_regs(regs);
+ 293 u32 token;
+ 294
+ 295 ack_APIC_irq();
+ 296
+ 297 inc_irq_stat(irq_hv_callback_count);
+ 298
+ 299 if (__this_cpu_read(apf_reason.enabled)) {
+ 300 token = __this_cpu_read(apf_reason.token);
+ 301 kvm_async_pf_task_wake(token);
+ 302 __this_cpu_write(apf_reason.token, 0);
+ 303 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
+ 304 }
+ 305
+ 306 set_irq_regs(old_regs);
+ 307 }
+
+The DEFINE_IDTENTRY_SYSVEC() is a wrapper that calls this function
+from the call_on_irqstack_cond(). It's inside the call_on_irqstack_cond()
+where preempt is disabled (unless it's already disabled). The
+irq_enter/exit_rcu() functions disable/enable preempt.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/kvm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -206,7 +206,7 @@ again:
+ */
+ if (!dummy) {
+ raw_spin_unlock(&b->lock);
+- dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
++ dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
+
+ /*
+ * Continue looping on allocation failure, eventually
--- /dev/null
+From foo@baz Fri Jun 3 04:32:03 PM CEST 2022
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 27 May 2022 16:02:15 +0300
+Subject: xfs: detect overflows in bmbt records
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Sasha Levin <sashal@kernel.org>, Dave Chinner <david@fromorbit.com>, "Darrick J . Wong" <darrick.wong@oracle.com>, Christoph Hellwig <hch@lst.de>, Luis Chamberlain <mcgrof@kernel.org>, Theodore Ts'o <tytso@mit.edu>, Leah Rumancik <leah.rumancik@gmail.com>, Chandan Babu R <chandan.babu@oracle.com>, Adam Manzanares <a.manzanares@samsung.com>, Tyler Hicks <code@tyhicks.com>, Jan Kara <jack@suse.cz>, linux-xfs@vger.kernel.org, stable@vger.kernel.org
+Message-ID: <20220527130219.3110260-2-amir73il@gmail.com>
+
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+
+commit acf104c2331c1ba2a667e65dd36139d1555b1432 upstream.
+
+Detect file block mappings with a blockcount that's either so large that
+integer overflows occur or are zero, because neither are valid in the
+filesystem. Worse yet, attempting directory modifications causes the
+iext code to trip over the bmbt key handling and takes the filesystem
+down. We can fix most of this by preventing the bad metadata from
+entering the incore structures in the first place.
+
+Found by setting blockcount=0 in a directory data fork mapping and
+watching the fireworks.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_bmap.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -6229,6 +6229,11 @@ xfs_bmap_validate_extent(
+ xfs_fsblock_t endfsb;
+ bool isrt;
+
++ if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
++ return __this_address;
++ if (irec->br_startoff + irec->br_blockcount <= irec->br_startoff)
++ return __this_address;
++
+ isrt = XFS_IS_REALTIME_INODE(ip);
+ endfsb = irec->br_startblock + irec->br_blockcount - 1;
+ if (isrt && whichfork == XFS_DATA_FORK) {
--- /dev/null
+From foo@baz Fri Jun 3 04:32:03 PM CEST 2022
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 27 May 2022 16:02:18 +0300
+Subject: xfs: fix an ABBA deadlock in xfs_rename
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Sasha Levin <sashal@kernel.org>, Dave Chinner <david@fromorbit.com>, "Darrick J . Wong" <darrick.wong@oracle.com>, Christoph Hellwig <hch@lst.de>, Luis Chamberlain <mcgrof@kernel.org>, Theodore Ts'o <tytso@mit.edu>, Leah Rumancik <leah.rumancik@gmail.com>, Chandan Babu R <chandan.babu@oracle.com>, Adam Manzanares <a.manzanares@samsung.com>, Tyler Hicks <code@tyhicks.com>, Jan Kara <jack@suse.cz>, linux-xfs@vger.kernel.org, stable@vger.kernel.org, wenli xie <wlxie7296@gmail.com>, Brian Foster <bfoster@redhat.com>
+Message-ID: <20220527130219.3110260-5-amir73il@gmail.com>
+
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+
+commit 6da1b4b1ab36d80a3994fd4811c8381de10af604 upstream.
+
+When overlayfs is running on top of xfs and the user unlinks a file in
+the overlay, overlayfs will create a whiteout inode and ask xfs to
+"rename" the whiteout file atop the one being unlinked. If the file
+being unlinked loses its one nlink, we then have to put the inode on the
+unlinked list.
+
+This requires us to grab the AGI buffer of the whiteout inode to take it
+off the unlinked list (which is where whiteouts are created) and to grab
+the AGI buffer of the file being deleted. If the whiteout was created
+in a higher numbered AG than the file being deleted, we'll lock the AGIs
+in the wrong order and deadlock.
+
+Therefore, grab all the AGI locks we think we'll need ahead of time, and
+in order of increasing AG number per the locking rules.
+
+Reported-by: wenli xie <wlxie7296@gmail.com>
+Fixes: 93597ae8dac0 ("xfs: Fix deadlock between AGI and AGF when target_ip exists in xfs_rename()")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/libxfs/xfs_dir2.h | 2 --
+ fs/xfs/libxfs/xfs_dir2_sf.c | 2 +-
+ fs/xfs/xfs_inode.c | 42 +++++++++++++++++++++++++-----------------
+ 3 files changed, 26 insertions(+), 20 deletions(-)
+
+--- a/fs/xfs/libxfs/xfs_dir2.h
++++ b/fs/xfs/libxfs/xfs_dir2.h
+@@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_tra
+ extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_name *name, xfs_ino_t ino,
+ xfs_extlen_t tot);
+-extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp,
+- xfs_ino_t inum);
+ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
+ struct xfs_name *name, xfs_ino_t inum,
+ xfs_extlen_t tot);
+--- a/fs/xfs/libxfs/xfs_dir2_sf.c
++++ b/fs/xfs/libxfs/xfs_dir2_sf.c
+@@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename(
+ /*
+ * Check whether the sf dir replace operation need more blocks.
+ */
+-bool
++static bool
+ xfs_dir2_sf_replace_needblock(
+ struct xfs_inode *dp,
+ xfs_ino_t inum)
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -3152,7 +3152,7 @@ xfs_rename(
+ struct xfs_trans *tp;
+ struct xfs_inode *wip = NULL; /* whiteout inode */
+ struct xfs_inode *inodes[__XFS_SORT_INODES];
+- struct xfs_buf *agibp;
++ int i;
+ int num_inodes = __XFS_SORT_INODES;
+ bool new_parent = (src_dp != target_dp);
+ bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
+@@ -3266,6 +3266,30 @@ xfs_rename(
+ }
+
+ /*
++ * Lock the AGI buffers we need to handle bumping the nlink of the
++ * whiteout inode off the unlinked list and to handle dropping the
++ * nlink of the target inode. Per locking order rules, do this in
++ * increasing AG order and before directory block allocation tries to
++ * grab AGFs because we grab AGIs before AGFs.
++ *
++ * The (vfs) caller must ensure that if src is a directory then
++ * target_ip is either null or an empty directory.
++ */
++ for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
++ if (inodes[i] == wip ||
++ (inodes[i] == target_ip &&
++ (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
++ struct xfs_buf *bp;
++ xfs_agnumber_t agno;
++
++ agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
++ error = xfs_read_agi(mp, tp, agno, &bp);
++ if (error)
++ goto out_trans_cancel;
++ }
++ }
++
++ /*
+ * Directory entry creation below may acquire the AGF. Remove
+ * the whiteout from the unlinked list first to preserve correct
+ * AGI/AGF locking order. This dirties the transaction so failures
+@@ -3317,22 +3341,6 @@ xfs_rename(
+ * In case there is already an entry with the same
+ * name at the destination directory, remove it first.
+ */
+-
+- /*
+- * Check whether the replace operation will need to allocate
+- * blocks. This happens when the shortform directory lacks
+- * space and we have to convert it to a block format directory.
+- * When more blocks are necessary, we must lock the AGI first
+- * to preserve locking order (AGI -> AGF).
+- */
+- if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
+- error = xfs_read_agi(mp, tp,
+- XFS_INO_TO_AGNO(mp, target_ip->i_ino),
+- &agibp);
+- if (error)
+- goto out_trans_cancel;
+- }
+-
+ error = xfs_dir_replace(tp, target_dp, target_name,
+ src_ip->i_ino, spaceres);
+ if (error)
--- /dev/null
+From foo@baz Fri Jun 3 04:32:03 PM CEST 2022
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 27 May 2022 16:02:19 +0300
+Subject: xfs: Fix CIL throttle hang when CIL space used going backwards
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Sasha Levin <sashal@kernel.org>, Dave Chinner <david@fromorbit.com>, "Darrick J . Wong" <darrick.wong@oracle.com>, Christoph Hellwig <hch@lst.de>, Luis Chamberlain <mcgrof@kernel.org>, Theodore Ts'o <tytso@mit.edu>, Leah Rumancik <leah.rumancik@gmail.com>, Chandan Babu R <chandan.babu@oracle.com>, Adam Manzanares <a.manzanares@samsung.com>, Tyler Hicks <code@tyhicks.com>, Jan Kara <jack@suse.cz>, linux-xfs@vger.kernel.org, stable@vger.kernel.org, Dave Chinner <dchinner@redhat.com>, Donald Buczek <buczek@molgen.mpg.de>, Brian Foster <bfoster@redhat.com>, Chandan Babu R <chandanrlinux@gmail.com>, "Darrick J . Wong" <djwong@kernel.org>, Allison Henderson <allison.henderson@oracle.com>
+Message-ID: <20220527130219.3110260-6-amir73il@gmail.com>
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 19f4e7cc819771812a7f527d7897c2deffbf7a00 upstream.
+
+A hang with tasks stuck on the CIL hard throttle was reported and
+largely diagnosed by Donald Buczek, who discovered that it was a
+result of the CIL context space usage decrementing in committed
+transactions once the hard throttle limit had been hit and processes
+were already blocked. This resulted in the CIL push not waking up
+those waiters because the CIL context was no longer over the hard
+throttle limit.
+
+The surprising aspect of this was the CIL space usage going
+backwards regularly enough to trigger this situation. Assumptions
+had been made in design that the relogging process would only
+increase the size of the objects in the CIL, and so that space would
+only increase.
+
+This change and commit message fixes the issue and documents the
+result of an audit of the triggers that can cause the CIL space to
+go backwards, how large the backwards steps tend to be, the
+frequency in which they occur, and what the impact on the CIL
+accounting code is.
+
+Even though the CIL ctx->space_used can go backwards, it will only
+do so if the log item is already logged to the CIL and contains a
+space reservation for it's entire logged state. This is tracked by
+the shadow buffer state on the log item. If the item is not
+previously logged in the CIL it has no shadow buffer nor log vector,
+and hence the entire size of the logged item copied to the log
+vector is accounted to the CIL space usage. i.e. it will always go
+up in this case.
+
+If the item has a log vector (i.e. already in the CIL) and the size
+decreases, then the existing log vector will be overwritten and the
+space usage will go down. This is the only condition where the space
+usage reduces, and it can only occur when an item is already tracked
+in the CIL. Hence we are safe from CIL space usage underruns as a
+result of log items decreasing in size when they are relogged.
+
+Typically this reduction in CIL usage occurs from metadata blocks
+being free, such as when a btree block merge occurs or a directory
+enter/xattr entry is removed and the da-tree is reduced in size.
+This generally results in a reduction in size of around a single
+block in the CIL, but also tends to increase the number of log
+vectors because the parent and sibling nodes in the tree needs to be
+updated when a btree block is removed. If a multi-level merge
+occurs, then we see reduction in size of 2+ blocks, but again the
+log vector count goes up.
+
+The other vector is inode fork size changes, which only log the
+current size of the fork and ignore the previously logged size when
+the fork is relogged. Hence if we are removing items from the inode
+fork (dir/xattr removal in shortform, extent record removal in
+extent form, etc) the relogged size of the inode for can decrease.
+
+No other log items can decrease in size either because they are a
+fixed size (e.g. dquots) or they cannot be relogged (e.g. relogging
+an intent actually creates a new intent log item and doesn't relog
+the old item at all.) Hence the only two vectors for CIL context
+size reduction are relogging inode forks and marking buffers active
+in the CIL as stale.
+
+Long story short: the majority of the code does the right thing and
+handles the reduction in log item size correctly, and only the CIL
+hard throttle implementation is problematic and needs fixing. This
+patch makes that fix, as well as adds comments in the log item code
+that result in items shrinking in size when they are relogged as a
+clear reminder that this can and does happen frequently.
+
+The throttle fix is based upon the change Donald proposed, though it
+goes further to ensure that once the throttle is activated, it
+captures all tasks until the CIL push issues a wakeup, regardless of
+whether the CIL space used has gone back under the throttle
+threshold.
+
+This ensures that we prevent tasks reducing the CIL slightly under
+the throttle threshold and then making more changes that push it
+well over the throttle limit. This is acheived by checking if the
+throttle wait queue is already active as a condition of throttling.
+Hence once we start throttling, we continue to apply the throttle
+until the CIL context push wakes everything on the wait queue.
+
+We can use waitqueue_active() for the waitqueue manipulations and
+checks as they are all done under the ctx->xc_push_lock. Hence the
+waitqueue has external serialisation and we can safely peek inside
+the wait queue without holding the internal waitqueue locks.
+
+Many thanks to Donald for his diagnostic and analysis work to
+isolate the cause of this hang.
+
+Reported-and-tested-by: Donald Buczek <buczek@molgen.mpg.de>
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_buf_item.c | 37 ++++++++++++++++++-------------------
+ fs/xfs/xfs_inode_item.c | 14 ++++++++++++++
+ fs/xfs/xfs_log_cil.c | 22 +++++++++++++++++-----
+ 3 files changed, 49 insertions(+), 24 deletions(-)
+
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -56,14 +56,12 @@ xfs_buf_log_format_size(
+ }
+
+ /*
+- * This returns the number of log iovecs needed to log the
+- * given buf log item.
++ * Return the number of log iovecs and space needed to log the given buf log
++ * item segment.
+ *
+- * It calculates this as 1 iovec for the buf log format structure
+- * and 1 for each stretch of non-contiguous chunks to be logged.
+- * Contiguous chunks are logged in a single iovec.
+- *
+- * If the XFS_BLI_STALE flag has been set, then log nothing.
++ * It calculates this as 1 iovec for the buf log format structure and 1 for each
++ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
++ * in a single iovec.
+ */
+ STATIC void
+ xfs_buf_item_size_segment(
+@@ -119,11 +117,8 @@ xfs_buf_item_size_segment(
+ }
+
+ /*
+- * This returns the number of log iovecs needed to log the given buf log item.
+- *
+- * It calculates this as 1 iovec for the buf log format structure and 1 for each
+- * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
+- * in a single iovec.
++ * Return the number of log iovecs and space needed to log the given buf log
++ * item.
+ *
+ * Discontiguous buffers need a format structure per region that is being
+ * logged. This makes the changes in the buffer appear to log recovery as though
+@@ -133,7 +128,11 @@ xfs_buf_item_size_segment(
+ * what ends up on disk.
+ *
+ * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
+- * format structures.
++ * format structures. If the item has previously been logged and has dirty
++ * regions, we do not relog them in stale buffers. This has the effect of
++ * reducing the size of the relogged item by the amount of dirty data tracked
++ * by the log item. This can result in the committing transaction reducing the
++ * amount of space being consumed by the CIL.
+ */
+ STATIC void
+ xfs_buf_item_size(
+@@ -147,9 +146,9 @@ xfs_buf_item_size(
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ if (bip->bli_flags & XFS_BLI_STALE) {
+ /*
+- * The buffer is stale, so all we need to log
+- * is the buf log format structure with the
+- * cancel flag in it.
++ * The buffer is stale, so all we need to log is the buf log
++ * format structure with the cancel flag in it as we are never
++ * going to replay the changes tracked in the log item.
+ */
+ trace_xfs_buf_item_size_stale(bip);
+ ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+@@ -164,9 +163,9 @@ xfs_buf_item_size(
+
+ if (bip->bli_flags & XFS_BLI_ORDERED) {
+ /*
+- * The buffer has been logged just to order it.
+- * It is not being included in the transaction
+- * commit, so no vectors are used at all.
++ * The buffer has been logged just to order it. It is not being
++ * included in the transaction commit, so no vectors are used at
++ * all.
+ */
+ trace_xfs_buf_item_size_ordered(bip);
+ *nvecs = XFS_LOG_VEC_ORDERED;
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -28,6 +28,20 @@ static inline struct xfs_inode_log_item
+ return container_of(lip, struct xfs_inode_log_item, ili_item);
+ }
+
++/*
++ * The logged size of an inode fork is always the current size of the inode
++ * fork. This means that when an inode fork is relogged, the size of the logged
++ * region is determined by the current state, not the combination of the
++ * previously logged state + the current state. This is different relogging
++ * behaviour to most other log items which will retain the size of the
++ * previously logged changes when smaller regions are relogged.
++ *
++ * Hence operations that remove data from the inode fork (e.g. shortform
++ * dir/attr remove, extent form extent removal, etc), the size of the relogged
++ * inode gets -smaller- rather than stays the same size as the previously logged
++ * size and this can result in the committing transaction reducing the amount of
++ * space being consumed by the CIL.
++ */
+ STATIC void
+ xfs_inode_item_data_fork_size(
+ struct xfs_inode_log_item *iip,
+--- a/fs/xfs/xfs_log_cil.c
++++ b/fs/xfs/xfs_log_cil.c
+@@ -668,9 +668,14 @@ xlog_cil_push_work(
+ ASSERT(push_seq <= ctx->sequence);
+
+ /*
+- * Wake up any background push waiters now this context is being pushed.
++ * As we are about to switch to a new, empty CIL context, we no longer
++ * need to throttle tasks on CIL space overruns. Wake any waiters that
++ * the hard push throttle may have caught so they can start committing
++ * to the new context. The ctx->xc_push_lock provides the serialisation
++ * necessary for safely using the lockless waitqueue_active() check in
++ * this context.
+ */
+- if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
++ if (waitqueue_active(&cil->xc_push_wait))
+ wake_up_all(&cil->xc_push_wait);
+
+ /*
+@@ -907,7 +912,7 @@ xlog_cil_push_background(
+ ASSERT(!list_empty(&cil->xc_cil));
+
+ /*
+- * don't do a background push if we haven't used up all the
++ * Don't do a background push if we haven't used up all the
+ * space available yet.
+ */
+ if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
+@@ -931,9 +936,16 @@ xlog_cil_push_background(
+
+ /*
+ * If we are well over the space limit, throttle the work that is being
+- * done until the push work on this context has begun.
++ * done until the push work on this context has begun. Enforce the hard
++ * throttle on all transaction commits once it has been activated, even
++ * if the committing transactions have resulted in the space usage
++ * dipping back down under the hard limit.
++ *
++ * The ctx->xc_push_lock provides the serialisation necessary for safely
++ * using the lockless waitqueue_active() check in this context.
+ */
+- if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
++ if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
++ waitqueue_active(&cil->xc_push_wait)) {
+ trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
+ ASSERT(cil->xc_ctx->space_used < log->l_logsize);
+ xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
--- /dev/null
+From foo@baz Fri Jun 3 04:32:03 PM CEST 2022
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 27 May 2022 16:02:17 +0300
+Subject: xfs: fix the forward progress assertion in xfs_iwalk_run_callbacks
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Sasha Levin <sashal@kernel.org>, Dave Chinner <david@fromorbit.com>, "Darrick J . Wong" <darrick.wong@oracle.com>, Christoph Hellwig <hch@lst.de>, Luis Chamberlain <mcgrof@kernel.org>, Theodore Ts'o <tytso@mit.edu>, Leah Rumancik <leah.rumancik@gmail.com>, Chandan Babu R <chandan.babu@oracle.com>, Adam Manzanares <a.manzanares@samsung.com>, Tyler Hicks <code@tyhicks.com>, Jan Kara <jack@suse.cz>, linux-xfs@vger.kernel.org, stable@vger.kernel.org, zlang@redhat.com, Dave Chinner <dchinner@redhat.com>
+Message-ID: <20220527130219.3110260-4-amir73il@gmail.com>
+
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+
+commit a5336d6bb2d02d0e9d4d3c8be04b80b8b68d56c8 upstream.
+
+In commit 27c14b5daa82 we started tracking the last inode seen during an
+inode walk to avoid infinite loops if a corrupt inobt record happens to
+have a lower ir_startino than the record preceeding it. Unfortunately,
+the assertion trips over the case where there are completely empty inobt
+records (which can happen quite easily on 64k page filesystems) because
+we advance the tracking cursor without actually putting the empty record
+into the processing buffer. Fix the assert to allow for this case.
+
+Reported-by: zlang@redhat.com
+Fixes: 27c14b5daa82 ("xfs: ensure inobt record walks always make forward progress")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Zorro Lang <zlang@redhat.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_iwalk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_iwalk.c
++++ b/fs/xfs/xfs_iwalk.c
+@@ -363,7 +363,7 @@ xfs_iwalk_run_callbacks(
+ /* Delete cursor but remember the last record we cached... */
+ xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
+ irec = &iwag->recs[iwag->nr_recs - 1];
+- ASSERT(next_agino == irec->ir_startino + XFS_INODES_PER_CHUNK);
++ ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
+
+ error = xfs_iwalk_ag_recs(iwag);
+ if (error)
--- /dev/null
+From foo@baz Fri Jun 3 04:32:03 PM CEST 2022
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Fri, 27 May 2022 16:02:16 +0300
+Subject: xfs: show the proper user quota options
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Sasha Levin <sashal@kernel.org>, Dave Chinner <david@fromorbit.com>, "Darrick J . Wong" <darrick.wong@oracle.com>, Christoph Hellwig <hch@lst.de>, Luis Chamberlain <mcgrof@kernel.org>, Theodore Ts'o <tytso@mit.edu>, Leah Rumancik <leah.rumancik@gmail.com>, Chandan Babu R <chandan.babu@oracle.com>, Adam Manzanares <a.manzanares@samsung.com>, Tyler Hicks <code@tyhicks.com>, Jan Kara <jack@suse.cz>, linux-xfs@vger.kernel.org, stable@vger.kernel.org, Kaixu Xia <kaixuxia@tencent.com>
+Message-ID: <20220527130219.3110260-3-amir73il@gmail.com>
+
+From: Kaixu Xia <kaixuxia@tencent.com>
+
+commit 237d7887ae723af7d978e8b9a385fdff416f357b upstream.
+
+The quota option 'usrquota' should be shown if both the XFS_UQUOTA_ACCT
+and XFS_UQUOTA_ENFD flags are set. The option 'uqnoenforce' should be
+shown when only the XFS_UQUOTA_ACCT flag is set. The current code logic
+seems wrong, Fix it and show proper options.
+
+Signed-off-by: Kaixu Xia <kaixuxia@tencent.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/xfs/xfs_super.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -199,10 +199,12 @@ xfs_fs_show_options(
+ seq_printf(m, ",swidth=%d",
+ (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
+
+- if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
+- seq_puts(m, ",usrquota");
+- else if (mp->m_qflags & XFS_UQUOTA_ACCT)
+- seq_puts(m, ",uqnoenforce");
++ if (mp->m_qflags & XFS_UQUOTA_ACCT) {
++ if (mp->m_qflags & XFS_UQUOTA_ENFD)
++ seq_puts(m, ",usrquota");
++ else
++ seq_puts(m, ",uqnoenforce");
++ }
+
+ if (mp->m_qflags & XFS_PQUOTA_ACCT) {
+ if (mp->m_qflags & XFS_PQUOTA_ENFD)
--- /dev/null
+From 2505a981114dcb715f8977b8433f7540854851d8 Mon Sep 17 00:00:00 2001
+From: Sultan Alsawaf <sultan@kerneltoast.com>
+Date: Fri, 13 May 2022 15:11:26 -0700
+Subject: zsmalloc: fix races between asynchronous zspage free and page migration
+
+From: Sultan Alsawaf <sultan@kerneltoast.com>
+
+commit 2505a981114dcb715f8977b8433f7540854851d8 upstream.
+
+The asynchronous zspage free worker tries to lock a zspage's entire page
+list without defending against page migration. Since pages which haven't
+yet been locked can concurrently migrate off the zspage page list while
+lock_zspage() churns away, lock_zspage() can suffer from a few different
+lethal races.
+
+It can lock a page which no longer belongs to the zspage and unsafely
+dereference page_private(), it can unsafely dereference a torn pointer to
+the next page (since there's a data race), and it can observe a spurious
+NULL pointer to the next page and thus not lock all of the zspage's pages
+(since a single page migration will reconstruct the entire page list, and
+create_page_chain() unconditionally zeroes out each list pointer in the
+process).
+
+Fix the races by using migrate_read_lock() in lock_zspage() to synchronize
+with page migration.
+
+Link: https://lkml.kernel.org/r/20220509024703.243847-1-sultan@kerneltoast.com
+Fixes: 77ff465799c602 ("zsmalloc: zs_page_migrate: skip unnecessary loops but not return -EBUSY if zspage is not inuse")
+Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: Nitin Gupta <ngupta@vflare.org>
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/zsmalloc.c | 37 +++++++++++++++++++++++++++++++++----
+ 1 file changed, 33 insertions(+), 4 deletions(-)
+
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspag
+ */
+ static void lock_zspage(struct zspage *zspage)
+ {
+- struct page *page = get_first_page(zspage);
++ struct page *curr_page, *page;
+
+- do {
+- lock_page(page);
+- } while ((page = get_next_page(page)) != NULL);
++ /*
++ * Pages we haven't locked yet can be migrated off the list while we're
++ * trying to lock them, so we need to be careful and only attempt to
++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
++ * may no longer belong to the zspage. This means that we may wait for
++ * the wrong page to unlock, so we must take a reference to the page
++ * prior to waiting for it to unlock outside migrate_read_lock().
++ */
++ while (1) {
++ migrate_read_lock(zspage);
++ page = get_first_page(zspage);
++ if (trylock_page(page))
++ break;
++ get_page(page);
++ migrate_read_unlock(zspage);
++ wait_on_page_locked(page);
++ put_page(page);
++ }
++
++ curr_page = page;
++ while ((page = get_next_page(curr_page))) {
++ if (trylock_page(page)) {
++ curr_page = page;
++ } else {
++ get_page(page);
++ migrate_read_unlock(zspage);
++ wait_on_page_locked(page);
++ put_page(page);
++ migrate_read_lock(zspage);
++ }
++ }
++ migrate_read_unlock(zspage);
+ }
+
+ static int zs_init_fs_context(struct fs_context *fc)