From: Greg Kroah-Hartman Date: Tue, 12 May 2026 17:33:41 +0000 (+0200) Subject: 6.12-stable patches X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0fae8610faa3354fd845486a12bdfa64088beb2a;p=thirdparty%2Fkernel%2Fstable-queue.git 6.12-stable patches added patches: alsa-aloop-fix-peer-runtime-uaf-during-format-change-stop.patch crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch crypto-nx-migrate-to-scomp-api.patch dma-mapping-add-__dma_from_device_group_begin-end.patch dma-mapping-drop-unneeded-includes-from-dma-mapping.h.patch erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch erofs-move-in-out-pages-into-struct-z_erofs_decompress_req.patch erofs-tidy-up-z_erofs_lz4_handle_overlap.patch fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch fs-prepare-for-adding-lsm-blob-to-backing_file.patch gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch net-stmmac-avoid-shadowing-global-buf_sz.patch net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch octeon_ep_vf-add-null-check-for-napi_build_skb.patch printk-add-print_hex_dump_devel.patch rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch tracepoint-balance-regfunc-on-func_add-failure-in-tracepoint_add_func.patch udf-fix-partition-descriptor-append-bookkeeping.patch wifi-mt76-mt7925-fix-incorrect-tlv-length-in-clc-command.patch --- diff --git a/queue-6.12/alsa-aloop-fix-peer-runtime-uaf-during-format-change-stop.patch b/queue-6.12/alsa-aloop-fix-peer-runtime-uaf-during-format-change-stop.patch new file mode 100644 index 0000000000..7214151022 --- /dev/null +++ b/queue-6.12/alsa-aloop-fix-peer-runtime-uaf-during-format-change-stop.patch @@ -0,0 +1,131 @@ +From stable+bounces-244994-greg=kroah.com@vger.kernel.org Sun May 10 04:35:21 2026 +From: Sasha Levin +Date: Sat, 9 May 2026 22:35:15 -0400 +Subject: ALSA: aloop: Fix peer runtime UAF during format-change stop +To: stable@vger.kernel.org +Cc: "Cássio Gabriel" , syzbot+8fa95c41eafbc9d2ff6f@syzkaller.appspotmail.com, "Takashi Iwai" , "Takashi Iwai" , "Sasha Levin" +Message-ID: <20260510023515.3941521-1-sashal@kernel.org> + +From: Cássio Gabriel + +[ Upstream commit e5c33cdc6f402eab8abd36ecf436b22c9d3a8aff ] + +loopback_check_format() may stop the capture side when playback starts +with parameters that no longer match a running capture stream. Commit +826af7fa62e3 ("ALSA: aloop: Fix racy access at PCM trigger") moved +the peer lookup under cable->lock, but the actual snd_pcm_stop() still +runs after dropping that lock. + +A concurrent close can clear the capture entry from cable->streams[] and +detach or free its runtime while the playback trigger path still holds a +stale peer substream pointer. + +Keep a per-cable count of in-flight peer stops before dropping +cable->lock, and make free_cable() wait for those stops before +detaching the runtime. This preserves the existing behavior while +making the peer runtime lifetime explicit. + +Reported-by: syzbot+8fa95c41eafbc9d2ff6f@syzkaller.appspotmail.com +Closes: https://syzkaller.appspot.com/bug?extid=8fa95c41eafbc9d2ff6f +Fixes: 597603d615d2 ("ALSA: introduce the snd-aloop module for the PCM loopback") +Cc: stable@vger.kernel.org +Suggested-by: Takashi Iwai +Signed-off-by: Cássio Gabriel +Link: https://patch.msgid.link/20260424-alsa-aloop-peer-stop-uaf-v2-1-94e68101db8a@gmail.com +Signed-off-by: Takashi Iwai +[ used scoped_guard(spinlock_irq) instead of guard(spinlock_irq) ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + sound/drivers/aloop.c | 44 ++++++++++++++++++++++++++++++-------------- + 1 file changed, 30 insertions(+), 14 deletions(-) + +--- a/sound/drivers/aloop.c ++++ b/sound/drivers/aloop.c +@@ -98,6 +98,9 @@ struct loopback_ops { + struct loopback_cable { + spinlock_t lock; + struct loopback_pcm *streams[2]; ++ /* in-flight peer stops running outside cable->lock */ ++ atomic_t stop_count; ++ wait_queue_head_t stop_wait; + struct snd_pcm_hardware hw; + /* flags */ + unsigned int valid; +@@ -365,8 +368,11 @@ static int loopback_check_format(struct + return 0; + if (stream == SNDRV_PCM_STREAM_CAPTURE) + return -EIO; +- else if (cruntime->state == SNDRV_PCM_STATE_RUNNING) ++ else if (cruntime->state == SNDRV_PCM_STATE_RUNNING) { ++ /* close must not free the peer runtime below */ ++ atomic_inc(&cable->stop_count); + stop_capture = true; ++ } + } + + setup = get_setup(dpcm_play); +@@ -395,8 +401,11 @@ static int loopback_check_format(struct + } + } + +- if (stop_capture) ++ if (stop_capture) { + snd_pcm_stop(dpcm_capt->substream, SNDRV_PCM_STATE_DRAINING); ++ if (atomic_dec_and_test(&cable->stop_count)) ++ wake_up(&cable->stop_wait); ++ } + + return 0; + } +@@ -1050,24 +1059,29 @@ static void free_cable(struct snd_pcm_su + struct loopback *loopback = substream->private_data; + int dev = get_cable_index(substream); + struct loopback_cable *cable; ++ struct loopback_pcm *dpcm; ++ bool other_alive; + + cable = loopback->cables[substream->number][dev]; + if (!cable) + return; +- if (cable->streams[!substream->stream]) { +- /* other stream is still alive */ +- spin_lock_irq(&cable->lock); +- cable->streams[substream->stream] = NULL; +- spin_unlock_irq(&cable->lock); +- } else { +- struct loopback_pcm *dpcm = substream->runtime->private_data; + +- if (cable->ops && cable->ops->close_cable && dpcm) +- cable->ops->close_cable(dpcm); +- /* free the cable */ +- loopback->cables[substream->number][dev] = NULL; +- kfree(cable); ++ scoped_guard(spinlock_irq, &cable->lock) { ++ cable->streams[substream->stream] = NULL; ++ other_alive = cable->streams[!substream->stream]; + } ++ ++ /* Pair with the stop_count increment in loopback_check_format(). */ ++ wait_event(cable->stop_wait, !atomic_read(&cable->stop_count)); ++ if (other_alive) ++ return; ++ ++ dpcm = substream->runtime->private_data; ++ if (cable->ops && cable->ops->close_cable && dpcm) ++ cable->ops->close_cable(dpcm); ++ /* free the cable */ ++ loopback->cables[substream->number][dev] = NULL; ++ kfree(cable); + } + + static int loopback_jiffies_timer_open(struct loopback_pcm *dpcm) +@@ -1264,6 +1278,8 @@ static int loopback_open(struct snd_pcm_ + goto unlock; + } + spin_lock_init(&cable->lock); ++ atomic_set(&cable->stop_count, 0); ++ init_waitqueue_head(&cable->stop_wait); + cable->hw = loopback_pcm_hardware; + if (loopback->timer_source) + cable->ops = &loopback_snd_timer_ops; diff --git a/queue-6.12/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch b/queue-6.12/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch new file mode 100644 index 0000000000..17c95f424d --- /dev/null +++ b/queue-6.12/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch @@ -0,0 +1,68 @@ +From stable+bounces-244993-greg=kroah.com@vger.kernel.org Sun May 10 04:34:52 2026 +From: Sasha Levin +Date: Sat, 9 May 2026 22:34:42 -0400 +Subject: crypto: caam - guard HMAC key hex dumps in hash_digest_key +To: stable@vger.kernel.org +Cc: Thorsten Blum , Herbert Xu , Sasha Levin +Message-ID: <20260510023442.3940261-2-sashal@kernel.org> + +From: Thorsten Blum + +[ Upstream commit 177730a273b18e195263ed953853273e901b5064 ] + +Use print_hex_dump_devel() for dumping sensitive HMAC key bytes in +hash_digest_key() to avoid leaking secrets at runtime when +CONFIG_DYNAMIC_DEBUG is enabled. + +Fixes: 045e36780f11 ("crypto: caam - ahash hmac support") +Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/crypto/caam/caamalg_qi2.c | 4 ++-- + drivers/crypto/caam/caamhash.c | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/caam/caamalg_qi2.c ++++ b/drivers/crypto/caam/caamalg_qi2.c +@@ -3268,7 +3268,7 @@ static int hash_digest_key(struct caam_h + dpaa2_fl_set_addr(out_fle, key_dma); + dpaa2_fl_set_len(out_fle, digestsize); + +- print_hex_dump_debug("key_in@" __stringify(__LINE__)": ", ++ print_hex_dump_devel("key_in@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); + print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), +@@ -3288,7 +3288,7 @@ static int hash_digest_key(struct caam_h + /* in progress */ + wait_for_completion(&result.completion); + ret = result.err; +- print_hex_dump_debug("digested key@" __stringify(__LINE__)": ", ++ print_hex_dump_devel("digested key@" __stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, + digestsize, 1); + } +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_h + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + +- print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", ++ print_hex_dump_devel("key_in@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); + print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), +@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_h + wait_for_completion(&result.completion); + ret = result.err; + +- print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", ++ print_hex_dump_devel("digested key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, + digestsize, 1); + } diff --git a/queue-6.12/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch b/queue-6.12/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch new file mode 100644 index 0000000000..4ef96117b5 --- /dev/null +++ b/queue-6.12/crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch @@ -0,0 +1,51 @@ +From stable+bounces-244894-greg=kroah.com@vger.kernel.org Sat May 9 05:16:48 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 23:16:39 -0400 +Subject: crypto: nx - fix bounce buffer leaks in nx842_crypto_{alloc,free}_ctx +To: stable@vger.kernel.org +Cc: Thorsten Blum , Herbert Xu , Sasha Levin +Message-ID: <20260509031639.3054679-2-sashal@kernel.org> + +From: Thorsten Blum + +[ Upstream commit adb3faf2db1a66d0f015b44ac909a32dfc7f2f9c ] + +The bounce buffers are allocated with __get_free_pages() using +BOUNCE_BUFFER_ORDER (order 2 = 4 pages), but both the allocation error +path and nx842_crypto_free_ctx() release the buffers with free_page(). +Use free_pages() with the matching order instead. + +Fixes: ed70b479c2c0 ("crypto: nx - add hardware 842 crypto comp alg") +Cc: stable@vger.kernel.org +Signed-off-by: Thorsten Blum +Signed-off-by: Herbert Xu +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/crypto/nx/nx-842.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/crypto/nx/nx-842.c ++++ b/drivers/crypto/nx/nx-842.c +@@ -116,8 +116,8 @@ void *nx842_crypto_alloc_ctx(struct nx84 + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { + kfree(ctx->wmem); +- free_page((unsigned long)ctx->sbounce); +- free_page((unsigned long)ctx->dbounce); ++ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); ++ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); + kfree(ctx); + return ERR_PTR(-ENOMEM); + } +@@ -131,8 +131,8 @@ void nx842_crypto_free_ctx(void *p) + struct nx842_crypto_ctx *ctx = p; + + kfree(ctx->wmem); +- free_page((unsigned long)ctx->sbounce); +- free_page((unsigned long)ctx->dbounce); ++ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER); ++ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER); + } + EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); + diff --git a/queue-6.12/crypto-nx-migrate-to-scomp-api.patch b/queue-6.12/crypto-nx-migrate-to-scomp-api.patch new file mode 100644 index 0000000000..5e3ce53b53 --- /dev/null +++ b/queue-6.12/crypto-nx-migrate-to-scomp-api.patch @@ -0,0 +1,287 @@ +From stable+bounces-244893-greg=kroah.com@vger.kernel.org Sat May 9 05:16:46 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 23:16:38 -0400 +Subject: crypto: nx - Migrate to scomp API +To: stable@vger.kernel.org +Cc: Ard Biesheuvel , Herbert Xu , Sasha Levin +Message-ID: <20260509031639.3054679-1-sashal@kernel.org> + +From: Ard Biesheuvel + +[ Upstream commit 980b5705f4e73f567e405cd18337cc32fd51cf79 ] + +The only remaining user of 842 compression has been migrated to the +acomp compression API, and so the NX hardware driver has to follow suit, +given that no users of the obsolete 'comp' API remain, and it is going +to be removed. + +So migrate the NX driver code to scomp. These will be wrapped and +exposed as acomp implementation via the crypto subsystem's +acomp-to-scomp adaptation layer. + +Signed-off-by: Ard Biesheuvel +Signed-off-by: Herbert Xu +Stable-dep-of: adb3faf2db1a ("crypto: nx - fix bounce buffer leaks in nx842_crypto_{alloc,free}_ctx") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/crypto/nx/nx-842.c | 33 +++++++++++++++++++-------------- + drivers/crypto/nx/nx-842.h | 15 ++++++++------- + drivers/crypto/nx/nx-common-powernv.c | 31 +++++++++++++++---------------- + drivers/crypto/nx/nx-common-pseries.c | 33 ++++++++++++++++----------------- + 4 files changed, 58 insertions(+), 54 deletions(-) + +--- a/drivers/crypto/nx/nx-842.c ++++ b/drivers/crypto/nx/nx-842.c +@@ -101,9 +101,13 @@ static int update_param(struct nx842_cry + return 0; + } + +-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) ++void *nx842_crypto_alloc_ctx(struct nx842_driver *driver) + { +- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct nx842_crypto_ctx *ctx; ++ ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) ++ return ERR_PTR(-ENOMEM); + + spin_lock_init(&ctx->lock); + ctx->driver = driver; +@@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); +- return -ENOMEM; ++ kfree(ctx); ++ return ERR_PTR(-ENOMEM); + } + +- return 0; ++ return ctx; + } +-EXPORT_SYMBOL_GPL(nx842_crypto_init); ++EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx); + +-void nx842_crypto_exit(struct crypto_tfm *tfm) ++void nx842_crypto_free_ctx(void *p) + { +- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct nx842_crypto_ctx *ctx = p; + + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); + } +-EXPORT_SYMBOL_GPL(nx842_crypto_exit); ++EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); + + static void check_constraints(struct nx842_constraints *c) + { +@@ -246,11 +251,11 @@ nospc: + return update_param(p, slen, dskip + dlen); + } + +-int nx842_crypto_compress(struct crypto_tfm *tfm, ++int nx842_crypto_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen) ++ u8 *dst, unsigned int *dlen, void *pctx) + { +- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct nx842_crypto_ctx *ctx = pctx; + struct nx842_crypto_header *hdr = + container_of(&ctx->header, + struct nx842_crypto_header, hdr); +@@ -431,11 +436,11 @@ usesw: + return update_param(p, slen + padding, dlen); + } + +-int nx842_crypto_decompress(struct crypto_tfm *tfm, ++int nx842_crypto_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen) ++ u8 *dst, unsigned int *dlen, void *pctx) + { +- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); ++ struct nx842_crypto_ctx *ctx = pctx; + struct nx842_crypto_header *hdr; + struct nx842_crypto_param p; + struct nx842_constraints c = *ctx->driver->constraints; +--- a/drivers/crypto/nx/nx-842.h ++++ b/drivers/crypto/nx/nx-842.h +@@ -3,7 +3,6 @@ + #ifndef __NX_842_H__ + #define __NX_842_H__ + +-#include + #include + #include + #include +@@ -101,6 +100,8 @@ + #define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) + #define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) + ++struct crypto_scomp; ++ + static inline unsigned long nx842_get_pa(void *addr) + { + if (!is_vmalloc_addr(addr)) +@@ -182,13 +183,13 @@ struct nx842_crypto_ctx { + struct nx842_driver *driver; + }; + +-int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver); +-void nx842_crypto_exit(struct crypto_tfm *tfm); +-int nx842_crypto_compress(struct crypto_tfm *tfm, ++void *nx842_crypto_alloc_ctx(struct nx842_driver *driver); ++void nx842_crypto_free_ctx(void *ctx); ++int nx842_crypto_compress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen); +-int nx842_crypto_decompress(struct crypto_tfm *tfm, ++ u8 *dst, unsigned int *dlen, void *ctx); ++int nx842_crypto_decompress(struct crypto_scomp *tfm, + const u8 *src, unsigned int slen, +- u8 *dst, unsigned int *dlen); ++ u8 *dst, unsigned int *dlen, void *ctx); + + #endif /* __NX_842_H__ */ +--- a/drivers/crypto/nx/nx-common-powernv.c ++++ b/drivers/crypto/nx/nx-common-powernv.c +@@ -9,6 +9,7 @@ + + #include "nx-842.h" + ++#include + #include + + #include +@@ -1031,23 +1032,21 @@ static struct nx842_driver nx842_powernv + .decompress = nx842_powernv_decompress, + }; + +-static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) ++static void *nx842_powernv_crypto_alloc_ctx(void) + { +- return nx842_crypto_init(tfm, &nx842_powernv_driver); ++ return nx842_crypto_alloc_ctx(&nx842_powernv_driver); + } + +-static struct crypto_alg nx842_powernv_alg = { +- .cra_name = "842", +- .cra_driver_name = "842-nx", +- .cra_priority = 300, +- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, +- .cra_ctxsize = sizeof(struct nx842_crypto_ctx), +- .cra_module = THIS_MODULE, +- .cra_init = nx842_powernv_crypto_init, +- .cra_exit = nx842_crypto_exit, +- .cra_u = { .compress = { +- .coa_compress = nx842_crypto_compress, +- .coa_decompress = nx842_crypto_decompress } } ++static struct scomp_alg nx842_powernv_alg = { ++ .base.cra_name = "842", ++ .base.cra_driver_name = "842-nx", ++ .base.cra_priority = 300, ++ .base.cra_module = THIS_MODULE, ++ ++ .alloc_ctx = nx842_powernv_crypto_alloc_ctx, ++ .free_ctx = nx842_crypto_free_ctx, ++ .compress = nx842_crypto_compress, ++ .decompress = nx842_crypto_decompress, + }; + + static __init int nx_compress_powernv_init(void) +@@ -1107,7 +1106,7 @@ static __init int nx_compress_powernv_in + nx842_powernv_exec = nx842_exec_vas; + } + +- ret = crypto_register_alg(&nx842_powernv_alg); ++ ret = crypto_register_scomp(&nx842_powernv_alg); + if (ret) { + nx_delete_coprocs(); + return ret; +@@ -1128,7 +1127,7 @@ static void __exit nx_compress_powernv_e + if (!nx842_ct) + vas_unregister_api_powernv(); + +- crypto_unregister_alg(&nx842_powernv_alg); ++ crypto_unregister_scomp(&nx842_powernv_alg); + + nx_delete_coprocs(); + } +--- a/drivers/crypto/nx/nx-common-pseries.c ++++ b/drivers/crypto/nx/nx-common-pseries.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + #include "nx-842.h" + #include "nx_csbcpb.h" /* struct nx_csbcpb */ +@@ -1008,23 +1009,21 @@ static struct nx842_driver nx842_pseries + .decompress = nx842_pseries_decompress, + }; + +-static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) ++static void *nx842_pseries_crypto_alloc_ctx(void) + { +- return nx842_crypto_init(tfm, &nx842_pseries_driver); ++ return nx842_crypto_alloc_ctx(&nx842_pseries_driver); + } + +-static struct crypto_alg nx842_pseries_alg = { +- .cra_name = "842", +- .cra_driver_name = "842-nx", +- .cra_priority = 300, +- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, +- .cra_ctxsize = sizeof(struct nx842_crypto_ctx), +- .cra_module = THIS_MODULE, +- .cra_init = nx842_pseries_crypto_init, +- .cra_exit = nx842_crypto_exit, +- .cra_u = { .compress = { +- .coa_compress = nx842_crypto_compress, +- .coa_decompress = nx842_crypto_decompress } } ++static struct scomp_alg nx842_pseries_alg = { ++ .base.cra_name = "842", ++ .base.cra_driver_name = "842-nx", ++ .base.cra_priority = 300, ++ .base.cra_module = THIS_MODULE, ++ ++ .alloc_ctx = nx842_pseries_crypto_alloc_ctx, ++ .free_ctx = nx842_crypto_free_ctx, ++ .compress = nx842_crypto_compress, ++ .decompress = nx842_crypto_decompress, + }; + + static int nx842_probe(struct vio_dev *viodev, +@@ -1072,7 +1071,7 @@ static int nx842_probe(struct vio_dev *v + if (ret) + goto error; + +- ret = crypto_register_alg(&nx842_pseries_alg); ++ ret = crypto_register_scomp(&nx842_pseries_alg); + if (ret) { + dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); + goto error; +@@ -1120,7 +1119,7 @@ static void nx842_remove(struct vio_dev + if (caps_feat) + sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group); + +- crypto_unregister_alg(&nx842_pseries_alg); ++ crypto_unregister_scomp(&nx842_pseries_alg); + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, +@@ -1252,7 +1251,7 @@ static void __exit nx842_pseries_exit(vo + + vas_unregister_api_pseries(); + +- crypto_unregister_alg(&nx842_pseries_alg); ++ crypto_unregister_scomp(&nx842_pseries_alg); + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, diff --git a/queue-6.12/dma-mapping-add-__dma_from_device_group_begin-end.patch b/queue-6.12/dma-mapping-add-__dma_from_device_group_begin-end.patch new file mode 100644 index 0000000000..8a64292e92 --- /dev/null +++ b/queue-6.12/dma-mapping-add-__dma_from_device_group_begin-end.patch @@ -0,0 +1,69 @@ +From stable+bounces-243985-greg=kroah.com@vger.kernel.org Tue May 5 08:01:48 2026 +From: Sasha Levin +Date: Tue, 5 May 2026 01:59:20 -0400 +Subject: dma-mapping: add __dma_from_device_group_begin()/end() +To: stable@vger.kernel.org +Cc: "Michael S. Tsirkin" , Marek Szyprowski , Petr Tesarik , Sasha Levin +Message-ID: <20260505055921.224904-2-sashal@kernel.org> + +From: "Michael S. Tsirkin" + +[ Upstream commit ca085faabb42c31ee204235facc5a430cb9e78a9 ] + +When a structure contains a buffer that DMA writes to alongside fields +that the CPU writes to, cache line sharing between the DMA buffer and +CPU-written fields can cause data corruption on non-cache-coherent +platforms. + +Add __dma_from_device_group_begin()/end() annotations to ensure proper +alignment to prevent this: + +struct my_device { + spinlock_t lock1; + __dma_from_device_group_begin(); + char dma_buffer1[16]; + char dma_buffer2[16]; + __dma_from_device_group_end(); + spinlock_t lock2; +}; + +Message-ID: <19163086d5e4704c316f18f6da06bc1c72968904.1767601130.git.mst@redhat.com> +Acked-by: Marek Szyprowski +Reviewed-by: Petr Tesarik +Signed-off-by: Michael S. Tsirkin +Stable-dep-of: 3023c050af36 ("hwmon: (powerz) Avoid cacheline sharing for DMA buffer") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/dma-mapping.h | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + /** + * List of possible attributes associated with a DMA mapping. The semantics +@@ -585,6 +586,18 @@ static inline int dma_get_cache_alignmen + } + #endif + ++#ifdef ARCH_HAS_DMA_MINALIGN ++#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN) ++#else ++#define ____dma_from_device_aligned ++#endif ++/* Mark start of DMA buffer */ ++#define __dma_from_device_group_begin(GROUP) \ ++ __cacheline_group_begin(GROUP) ____dma_from_device_aligned ++/* Mark end of DMA buffer */ ++#define __dma_from_device_group_end(GROUP) \ ++ __cacheline_group_end(GROUP) ____dma_from_device_aligned ++ + static inline void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) + { diff --git a/queue-6.12/dma-mapping-drop-unneeded-includes-from-dma-mapping.h.patch b/queue-6.12/dma-mapping-drop-unneeded-includes-from-dma-mapping.h.patch new file mode 100644 index 0000000000..4449a0e451 --- /dev/null +++ b/queue-6.12/dma-mapping-drop-unneeded-includes-from-dma-mapping.h.patch @@ -0,0 +1,53 @@ +From stable+bounces-243984-greg=kroah.com@vger.kernel.org Tue May 5 08:01:39 2026 +From: Sasha Levin +Date: Tue, 5 May 2026 01:59:19 -0400 +Subject: dma-mapping: drop unneeded includes from dma-mapping.h +To: stable@vger.kernel.org +Cc: Christoph Hellwig , Sasha Levin +Message-ID: <20260505055921.224904-1-sashal@kernel.org> + +From: Christoph Hellwig + +[ Upstream commit be164349e173a8e71cd76f17c7ed720813b8d69b ] + +Back in the day a lot of logic was implemented inline in dma-mapping.h and +needed various includes. Move of this has long been moved out of line, +so we can drop various includes to improve kernel rebuild times. + +Signed-off-by: Christoph Hellwig +Stable-dep-of: 3023c050af36 ("hwmon: (powerz) Avoid cacheline sharing for DMA buffer") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/platforms/pseries/svm.c | 1 + + include/linux/dma-mapping.h | 4 ---- + 2 files changed, 1 insertion(+), 4 deletions(-) + +--- a/arch/powerpc/platforms/pseries/svm.c ++++ b/arch/powerpc/platforms/pseries/svm.c +@@ -8,6 +8,7 @@ + + #include + #include ++#include + #include + #include + #include +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -2,15 +2,11 @@ + #ifndef _LINUX_DMA_MAPPING_H + #define _LINUX_DMA_MAPPING_H + +-#include +-#include +-#include + #include + #include + #include + #include + #include +-#include + + /** + * List of possible attributes associated with a DMA mapping. The semantics diff --git a/queue-6.12/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch b/queue-6.12/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch new file mode 100644 index 0000000000..863edb6ac1 --- /dev/null +++ b/queue-6.12/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch @@ -0,0 +1,57 @@ +From stable+bounces-244903-greg=kroah.com@vger.kernel.org Sat May 9 05:32:06 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 23:31:59 -0400 +Subject: erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap() +To: stable@vger.kernel.org +Cc: Junrui Luo , Yuhao Jiang , Gao Xiang , Sasha Levin +Message-ID: <20260509033159.3082967-3-sashal@kernel.org> + +From: Junrui Luo + +[ Upstream commit 21e161de2dc660b1bb70ef5b156ab8e6e1cca3ab ] + +Some crafted images can have illegal (!partial_decoding && +m_llen < m_plen) extents, and the LZ4 inplace decompression path +can be wrongly hit, but it cannot handle (outpages < inpages) +properly: "outpages - inpages" wraps to a large value and +the subsequent rq->out[] access reads past the decompressed_pages +array. + +However, such crafted cases can correctly result in a corruption +report in the normal LZ4 non-inplace path. + +Let's add an additional check to fix this for backporting. + +Reproducible image (base64-encoded gzipped blob): + +H4sIAJGR12kCA+3SPUoDQRgG4MkmkkZk8QRbRFIIi9hbpEjrHQI5ghfwCN5BLCzTGtLbBI+g +dilSJo1CnIm7GEXFxhT6PDDwfrs73/ywIQD/1ePD4r7Ou6ETsrq4mu7XcWfj++Pb58nJU/9i +PNtbjhan04/9GtX4qVYc814WDqt6FaX5s+ZwXXeq52lndT6IuVvlblytLMvh4Gzwaf90nsvz +2DF/21+20T/ldgp5s1jXRaN4t/8izsy/OUB6e/Qa79r+JwAAAAAAAL52vQVuGQAAAP6+my1w +ywAAAAAAAADwu14ATsEYtgBQAAA= + +$ mount -t erofs -o cache_strategy=disabled foo.erofs /mnt +$ dd if=/mnt/data of=/dev/null bs=4096 count=1 + +Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend") +Reported-by: Yuhao Jiang +Cc: stable@vger.kernel.org +Signed-off-by: Junrui Luo +Reviewed-by: Gao Xiang +Signed-off-by: Gao Xiang +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/erofs/decompressor.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/fs/erofs/decompressor.c ++++ b/fs/erofs/decompressor.c +@@ -149,6 +149,7 @@ static void *z_erofs_lz4_handle_overlap( + oend = rq->pageofs_out + rq->outputsize; + omargin = PAGE_ALIGN(oend) - oend; + if (!rq->partial_decoding && may_inplace && ++ rq->outpages >= rq->inpages && + omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) { + for (i = 0; i < rq->inpages; ++i) + if (rq->out[rq->outpages - rq->inpages + i] != diff --git a/queue-6.12/erofs-move-in-out-pages-into-struct-z_erofs_decompress_req.patch b/queue-6.12/erofs-move-in-out-pages-into-struct-z_erofs_decompress_req.patch new file mode 100644 index 0000000000..4949d68463 --- /dev/null +++ b/queue-6.12/erofs-move-in-out-pages-into-struct-z_erofs_decompress_req.patch @@ -0,0 +1,334 @@ +From stable+bounces-244901-greg=kroah.com@vger.kernel.org Sat May 9 05:32:05 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 23:31:57 -0400 +Subject: erofs: move {in,out}pages into struct z_erofs_decompress_req +To: stable@vger.kernel.org +Cc: Gao Xiang , Chao Yu , Sasha Levin +Message-ID: <20260509033159.3082967-1-sashal@kernel.org> + +From: Gao Xiang + +[ Upstream commit 0243cc257ffa6d8cb210a3070b687fb510f113c7 ] + +It seems that all compressors need those two values, so just move +them into the common structure. + +`struct z_erofs_lz4_decompress_ctx` can be dropped too. + +Reviewed-by: Chao Yu +Signed-off-by: Gao Xiang +Link: https://lore.kernel.org/r/20250305124007.1810731-1-hsiangkao@linux.alibaba.com +Stable-dep-of: 21e161de2dc6 ("erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap()") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/erofs/compress.h | 2 + fs/erofs/decompressor.c | 93 +++++++++++++++------------------------- + fs/erofs/decompressor_deflate.c | 8 --- + fs/erofs/decompressor_lzma.c | 8 --- + fs/erofs/decompressor_zstd.c | 8 --- + fs/erofs/zdata.c | 2 + 6 files changed, 41 insertions(+), 80 deletions(-) + +--- a/fs/erofs/compress.h ++++ b/fs/erofs/compress.h +@@ -11,6 +11,7 @@ + struct z_erofs_decompress_req { + struct super_block *sb; + struct page **in, **out; ++ unsigned int inpages, outpages; + unsigned short pageofs_in, pageofs_out; + unsigned int inputsize, outputsize; + +@@ -80,7 +81,6 @@ extern const struct z_erofs_decompressor + + struct z_erofs_stream_dctx { + struct z_erofs_decompress_req *rq; +- unsigned int inpages, outpages; /* # of {en,de}coded pages */ + int no, ni; /* the current {en,de}coded page # */ + + unsigned int avail_out; /* remaining bytes in the decoded buffer */ +--- a/fs/erofs/decompressor.c ++++ b/fs/erofs/decompressor.c +@@ -16,14 +16,6 @@ + #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) + #endif + +-struct z_erofs_lz4_decompress_ctx { +- struct z_erofs_decompress_req *rq; +- /* # of encoded, decoded pages */ +- unsigned int inpages, outpages; +- /* decoded block total length (used for in-place decompression) */ +- unsigned int oend; +-}; +- + static int z_erofs_load_lz4_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size) + { +@@ -62,10 +54,9 @@ static int z_erofs_load_lz4_config(struc + * Fill all gaps with bounce pages if it's a sparse page list. Also check if + * all physical pages are consecutive, which can be seen for moderate CR. + */ +-static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, ++static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq, + struct page **pagepool) + { +- struct z_erofs_decompress_req *rq = ctx->rq; + struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; + unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, + BITS_PER_LONG)] = { 0 }; +@@ -75,7 +66,7 @@ static int z_erofs_lz4_prepare_dstpages( + unsigned int i, j, top; + + top = 0; +- for (i = j = 0; i < ctx->outpages; ++i, ++j) { ++ for (i = j = 0; i < rq->outpages; ++i, ++j) { + struct page *const page = rq->out[i]; + struct page *victim; + +@@ -121,36 +112,36 @@ static int z_erofs_lz4_prepare_dstpages( + return kaddr ? 1 : 0; + } + +-static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ++static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq, + void *inpage, void *out, unsigned int *inputmargin, + int *maptype, bool may_inplace) + { +- struct z_erofs_decompress_req *rq = ctx->rq; +- unsigned int omargin, total, i; ++ unsigned int oend, omargin, total, i; + struct page **in; + void *src, *tmp; + + if (rq->inplace_io) { +- omargin = PAGE_ALIGN(ctx->oend) - ctx->oend; ++ oend = rq->pageofs_out + rq->outputsize; ++ omargin = PAGE_ALIGN(oend) - oend; + if (rq->partial_decoding || !may_inplace || + omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) + goto docopy; + +- for (i = 0; i < ctx->inpages; ++i) +- if (rq->out[ctx->outpages - ctx->inpages + i] != ++ for (i = 0; i < rq->inpages; ++i) ++ if (rq->out[rq->outpages - rq->inpages + i] != + rq->in[i]) + goto docopy; + kunmap_local(inpage); + *maptype = 3; +- return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT); ++ return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT); + } + +- if (ctx->inpages <= 1) { ++ if (rq->inpages <= 1) { + *maptype = 0; + return inpage; + } + kunmap_local(inpage); +- src = erofs_vm_map_ram(rq->in, ctx->inpages); ++ src = erofs_vm_map_ram(rq->in, rq->inpages); + if (!src) + return ERR_PTR(-ENOMEM); + *maptype = 1; +@@ -159,7 +150,7 @@ static void *z_erofs_lz4_handle_overlap( + docopy: + /* Or copy compressed data which can be overlapped to per-CPU buffer */ + in = rq->in; +- src = z_erofs_get_gbuf(ctx->inpages); ++ src = z_erofs_get_gbuf(rq->inpages); + if (!src) { + DBG_BUGON(1); + kunmap_local(inpage); +@@ -204,10 +195,8 @@ int z_erofs_fixup_insize(struct z_erofs_ + return 0; + } + +-static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, +- u8 *dst) ++static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst) + { +- struct z_erofs_decompress_req *rq = ctx->rq; + bool support_0padding = false, may_inplace = false; + unsigned int inputmargin; + u8 *out, *headpage, *src; +@@ -231,7 +220,7 @@ static int z_erofs_lz4_decompress_mem(st + } + + inputmargin = rq->pageofs_in; +- src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin, ++ src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin, + &maptype, may_inplace); + if (IS_ERR(src)) + return PTR_ERR(src); +@@ -258,7 +247,7 @@ static int z_erofs_lz4_decompress_mem(st + if (maptype == 0) { + kunmap_local(headpage); + } else if (maptype == 1) { +- vm_unmap_ram(src, ctx->inpages); ++ vm_unmap_ram(src, rq->inpages); + } else if (maptype == 2) { + z_erofs_put_gbuf(src); + } else if (maptype != 3) { +@@ -271,54 +260,42 @@ static int z_erofs_lz4_decompress_mem(st + static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, + struct page **pagepool) + { +- struct z_erofs_lz4_decompress_ctx ctx; + unsigned int dst_maptype; + void *dst; + int ret; + +- ctx.rq = rq; +- ctx.oend = rq->pageofs_out + rq->outputsize; +- ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT; +- ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; +- + /* one optimized fast path only for non bigpcluster cases yet */ +- if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) { ++ if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) { + DBG_BUGON(!*rq->out); + dst = kmap_local_page(*rq->out); + dst_maptype = 0; +- goto dstmap_out; +- } +- +- /* general decoding path which can be used for all cases */ +- ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool); +- if (ret < 0) { +- return ret; +- } else if (ret > 0) { +- dst = page_address(*rq->out); +- dst_maptype = 1; + } else { +- dst = erofs_vm_map_ram(rq->out, ctx.outpages); +- if (!dst) +- return -ENOMEM; +- dst_maptype = 2; ++ /* general decoding path which can be used for all cases */ ++ ret = z_erofs_lz4_prepare_dstpages(rq, pagepool); ++ if (ret < 0) ++ return ret; ++ if (ret > 0) { ++ dst = page_address(*rq->out); ++ dst_maptype = 1; ++ } else { ++ dst = erofs_vm_map_ram(rq->out, rq->outpages); ++ if (!dst) ++ return -ENOMEM; ++ dst_maptype = 2; ++ } + } +- +-dstmap_out: +- ret = z_erofs_lz4_decompress_mem(&ctx, dst); ++ ret = z_erofs_lz4_decompress_mem(rq, dst); + if (!dst_maptype) + kunmap_local(dst); + else if (dst_maptype == 2) +- vm_unmap_ram(dst, ctx.outpages); ++ vm_unmap_ram(dst, rq->outpages); + return ret; + } + + static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, + struct page **pagepool) + { +- const unsigned int nrpages_in = +- PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; +- const unsigned int nrpages_out = +- PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; ++ const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages; + const unsigned int bs = rq->sb->s_blocksize; + unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; + u8 *kin; +@@ -376,7 +353,7 @@ int z_erofs_stream_switch_bufs(struct z_ + unsigned int j; + + if (!dctx->avail_out) { +- if (++dctx->no >= dctx->outpages || !rq->outputsize) { ++ if (++dctx->no >= rq->outpages || !rq->outputsize) { + erofs_err(sb, "insufficient space for decompressed data"); + return -EFSCORRUPTED; + } +@@ -404,7 +381,7 @@ int z_erofs_stream_switch_bufs(struct z_ + } + + if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) { +- if (++dctx->ni >= dctx->inpages) { ++ if (++dctx->ni >= rq->inpages) { + erofs_err(sb, "invalid compressed data"); + return -EFSCORRUPTED; + } +@@ -437,7 +414,7 @@ int z_erofs_stream_switch_bufs(struct z_ + dctx->bounced = true; + } + +- for (j = dctx->ni + 1; j < dctx->inpages; ++j) { ++ for (j = dctx->ni + 1; j < rq->inpages; ++j) { + if (rq->out[dctx->no] != rq->in[j]) + continue; + tmppage = erofs_allocpage(pgpl, rq->gfp); +--- a/fs/erofs/decompressor_deflate.c ++++ b/fs/erofs/decompressor_deflate.c +@@ -101,13 +101,7 @@ static int z_erofs_deflate_decompress(st + struct page **pgpl) + { + struct super_block *sb = rq->sb; +- struct z_erofs_stream_dctx dctx = { +- .rq = rq, +- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, +- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) +- >> PAGE_SHIFT, +- .no = -1, .ni = 0, +- }; ++ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 }; + struct z_erofs_deflate *strm; + int zerr, err; + +--- a/fs/erofs/decompressor_lzma.c ++++ b/fs/erofs/decompressor_lzma.c +@@ -150,13 +150,7 @@ static int z_erofs_lzma_decompress(struc + struct page **pgpl) + { + struct super_block *sb = rq->sb; +- struct z_erofs_stream_dctx dctx = { +- .rq = rq, +- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, +- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) +- >> PAGE_SHIFT, +- .no = -1, .ni = 0, +- }; ++ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 }; + struct xz_buf buf = {}; + struct z_erofs_lzma *strm; + enum xz_ret xz_err; +--- a/fs/erofs/decompressor_zstd.c ++++ b/fs/erofs/decompressor_zstd.c +@@ -139,13 +139,7 @@ static int z_erofs_zstd_decompress(struc + struct page **pgpl) + { + struct super_block *sb = rq->sb; +- struct z_erofs_stream_dctx dctx = { +- .rq = rq, +- .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, +- .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) +- >> PAGE_SHIFT, +- .no = -1, .ni = 0, +- }; ++ struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 }; + zstd_in_buffer in_buf = { NULL, 0, 0 }; + zstd_out_buffer out_buf = { NULL, 0, 0 }; + struct z_erofs_zstd *strm; +--- a/fs/erofs/zdata.c ++++ b/fs/erofs/zdata.c +@@ -1292,6 +1292,8 @@ static int z_erofs_decompress_pcluster(s + .sb = be->sb, + .in = be->compressed_pages, + .out = be->decompressed_pages, ++ .inpages = pclusterpages, ++ .outpages = be->nr_pages, + .pageofs_in = pcl->pageofs_in, + .pageofs_out = pcl->pageofs_out, + .inputsize = pcl->pclustersize, diff --git a/queue-6.12/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch b/queue-6.12/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch new file mode 100644 index 0000000000..da2bf34047 --- /dev/null +++ b/queue-6.12/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch @@ -0,0 +1,139 @@ +From stable+bounces-244902-greg=kroah.com@vger.kernel.org Sat May 9 05:32:08 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 23:31:58 -0400 +Subject: erofs: tidy up z_erofs_lz4_handle_overlap() +To: stable@vger.kernel.org +Cc: Gao Xiang , Chao Yu , Sasha Levin +Message-ID: <20260509033159.3082967-2-sashal@kernel.org> + +From: Gao Xiang + +[ Upstream commit 9ae77198d4815c63fc8ebacc659c71d150d1e51b ] + + - Add some useful comments to explain inplace I/Os and decompression; + + - Rearrange the code to get rid of one unnecessary goto. + +Reviewed-by: Chao Yu +Signed-off-by: Gao Xiang +Stable-dep-of: 21e161de2dc6 ("erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap()") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/erofs/decompressor.c | 85 +++++++++++++++++++++++++----------------------- + 1 file changed, 46 insertions(+), 39 deletions(-) + +--- a/fs/erofs/decompressor.c ++++ b/fs/erofs/decompressor.c +@@ -112,44 +112,58 @@ static int z_erofs_lz4_prepare_dstpages( + return kaddr ? 1 : 0; + } + +-static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq, ++static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq, + void *inpage, void *out, unsigned int *inputmargin, + int *maptype, bool may_inplace) + { +- unsigned int oend, omargin, total, i; ++ unsigned int oend, omargin, cnt, i; + struct page **in; +- void *src, *tmp; +- +- if (rq->inplace_io) { +- oend = rq->pageofs_out + rq->outputsize; +- omargin = PAGE_ALIGN(oend) - oend; +- if (rq->partial_decoding || !may_inplace || +- omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) +- goto docopy; ++ void *src; + ++ /* ++ * If in-place I/O isn't used, for example, the bounce compressed cache ++ * can hold data for incomplete read requests. Just map the compressed ++ * buffer as well and decompress directly. ++ */ ++ if (!rq->inplace_io) { ++ if (rq->inpages <= 1) { ++ *maptype = 0; ++ return inpage; ++ } ++ kunmap_local(inpage); ++ src = erofs_vm_map_ram(rq->in, rq->inpages); ++ if (!src) ++ return ERR_PTR(-ENOMEM); ++ *maptype = 1; ++ return src; ++ } ++ /* ++ * Then, deal with in-place I/Os. The reasons why in-place I/O is useful ++ * are: (1) It minimizes memory footprint during the I/O submission, ++ * which is useful for slow storage (including network devices and ++ * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place ++ * decompression can also be applied, it will reuse the unique buffer so ++ * that no extra CPU D-cache is polluted with temporary compressed data ++ * for extreme performance. ++ */ ++ oend = rq->pageofs_out + rq->outputsize; ++ omargin = PAGE_ALIGN(oend) - oend; ++ if (!rq->partial_decoding && may_inplace && ++ omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) { + for (i = 0; i < rq->inpages; ++i) + if (rq->out[rq->outpages - rq->inpages + i] != + rq->in[i]) +- goto docopy; +- kunmap_local(inpage); +- *maptype = 3; +- return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT); ++ break; ++ if (i >= rq->inpages) { ++ kunmap_local(inpage); ++ *maptype = 3; ++ return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT); ++ } + } +- +- if (rq->inpages <= 1) { +- *maptype = 0; +- return inpage; +- } +- kunmap_local(inpage); +- src = erofs_vm_map_ram(rq->in, rq->inpages); +- if (!src) +- return ERR_PTR(-ENOMEM); +- *maptype = 1; +- return src; +- +-docopy: +- /* Or copy compressed data which can be overlapped to per-CPU buffer */ +- in = rq->in; ++ /* ++ * If in-place decompression can't be applied, copy compressed data that ++ * may potentially overlap during decompression to a per-CPU buffer. ++ */ + src = z_erofs_get_gbuf(rq->inpages); + if (!src) { + DBG_BUGON(1); +@@ -157,20 +171,13 @@ docopy: + return ERR_PTR(-EFAULT); + } + +- tmp = src; +- total = rq->inputsize; +- while (total) { +- unsigned int page_copycnt = +- min_t(unsigned int, total, PAGE_SIZE - *inputmargin); +- ++ for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) { ++ cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin); + if (!inpage) + inpage = kmap_local_page(*in); +- memcpy(tmp, inpage + *inputmargin, page_copycnt); ++ memcpy(src + i, inpage + *inputmargin, cnt); + kunmap_local(inpage); + inpage = NULL; +- tmp += page_copycnt; +- total -= page_copycnt; +- ++in; + *inputmargin = 0; + } + *maptype = 2; diff --git a/queue-6.12/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch b/queue-6.12/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch new file mode 100644 index 0000000000..6611213298 --- /dev/null +++ b/queue-6.12/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch @@ -0,0 +1,371 @@ +From stable+bounces-243937-greg=kroah.com@vger.kernel.org Tue May 5 02:15:33 2026 +From: Sasha Levin +Date: Mon, 4 May 2026 20:14:53 -0400 +Subject: fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info +To: stable@vger.kernel.org +Cc: Thomas Zimmermann , Helge Deller , linux-fbdev@vger.kernel.org, dri-devel@lists.freedesktop.org, Sasha Levin +Message-ID: <20260505001453.124124-1-sashal@kernel.org> + +From: Thomas Zimmermann + +[ Upstream commit 9ded47ad003f09a94b6a710b5c47f4aa5ceb7429 ] + +Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an +instance as part of initializing deferred I/O and remove it only after +the final mapping has been closed. If the fb_info and the contained +deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info +to invalidate the mapping. Any access will then result in a SIGBUS +signal. + +Fixes a long-standing problem, where a device hot-unplug happens while +user space still has an active mapping of the graphics memory. The hot- +unplug frees the instance of struct fb_info. Accessing the memory will +operate on undefined state. + +Signed-off-by: Thomas Zimmermann +Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support") +Cc: Helge Deller +Cc: linux-fbdev@vger.kernel.org +Cc: dri-devel@lists.freedesktop.org +Cc: stable@vger.kernel.org # v2.6.22+ +Signed-off-by: Helge Deller +[ replaced `kzalloc_obj()` with `kzalloc(sizeof(*fbdefio_state), GFP_KERNEL)` ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/video/fbdev/core/fb_defio.c | 179 ++++++++++++++++++++++++++++-------- + include/linux/fb.h | 4 + 2 files changed, 145 insertions(+), 38 deletions(-) + +--- a/drivers/video/fbdev/core/fb_defio.c ++++ b/drivers/video/fbdev/core/fb_defio.c +@@ -23,6 +23,75 @@ + #include + #include + ++/* ++ * struct fb_deferred_io_state ++ */ ++ ++struct fb_deferred_io_state { ++ struct kref ref; ++ ++ struct mutex lock; /* mutex that protects the pageref list */ ++ /* fields protected by lock */ ++ struct fb_info *info; ++}; ++ ++static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void) ++{ ++ struct fb_deferred_io_state *fbdefio_state; ++ ++ fbdefio_state = kzalloc(sizeof(*fbdefio_state), GFP_KERNEL); ++ if (!fbdefio_state) ++ return NULL; ++ ++ kref_init(&fbdefio_state->ref); ++ mutex_init(&fbdefio_state->lock); ++ ++ return fbdefio_state; ++} ++ ++static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state) ++{ ++ mutex_destroy(&fbdefio_state->lock); ++ ++ kfree(fbdefio_state); ++} ++ ++static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state) ++{ ++ kref_get(&fbdefio_state->ref); ++} ++ ++static void __fb_deferred_io_state_release(struct kref *ref) ++{ ++ struct fb_deferred_io_state *fbdefio_state = ++ container_of(ref, struct fb_deferred_io_state, ref); ++ ++ fb_deferred_io_state_release(fbdefio_state); ++} ++ ++static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state) ++{ ++ kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release); ++} ++ ++/* ++ * struct vm_operations_struct ++ */ ++ ++static void fb_deferred_io_vm_open(struct vm_area_struct *vma) ++{ ++ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data; ++ ++ fb_deferred_io_state_get(fbdefio_state); ++} ++ ++static void fb_deferred_io_vm_close(struct vm_area_struct *vma) ++{ ++ struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data; ++ ++ fb_deferred_io_state_put(fbdefio_state); ++} ++ + static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs) + { + struct fb_deferred_io *fbdefio = info->fbdefio; +@@ -128,17 +197,31 @@ static void fb_deferred_io_pageref_put(s + /* this is to find and return the vmalloc-ed fb pages */ + static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) + { ++ struct fb_info *info; + unsigned long offset; + struct page *page; +- struct fb_info *info = vmf->vma->vm_private_data; ++ vm_fault_t ret; ++ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data; ++ ++ mutex_lock(&fbdefio_state->lock); ++ ++ info = fbdefio_state->info; ++ if (!info) { ++ ret = VM_FAULT_SIGBUS; /* our device is gone */ ++ goto err_mutex_unlock; ++ } + + offset = vmf->pgoff << PAGE_SHIFT; +- if (offset >= info->fix.smem_len) +- return VM_FAULT_SIGBUS; ++ if (offset >= info->fix.smem_len) { ++ ret = VM_FAULT_SIGBUS; ++ goto err_mutex_unlock; ++ } + + page = fb_deferred_io_get_page(info, offset); +- if (!page) +- return VM_FAULT_SIGBUS; ++ if (!page) { ++ ret = VM_FAULT_SIGBUS; ++ goto err_mutex_unlock; ++ } + + if (vmf->vma->vm_file) + page->mapping = vmf->vma->vm_file->f_mapping; +@@ -148,8 +231,15 @@ static vm_fault_t fb_deferred_io_fault(s + BUG_ON(!page->mapping); + page->index = vmf->pgoff; /* for folio_mkclean() */ + ++ mutex_unlock(&fbdefio_state->lock); ++ + vmf->page = page; ++ + return 0; ++ ++err_mutex_unlock: ++ mutex_unlock(&fbdefio_state->lock); ++ return ret; + } + + int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) +@@ -176,15 +266,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); + * Adds a page to the dirty list. Call this from struct + * vm_operations_struct.page_mkwrite. + */ +-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset, +- struct page *page) ++static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state, ++ unsigned long offset, struct page *page) + { +- struct fb_deferred_io *fbdefio = info->fbdefio; ++ struct fb_info *info; ++ struct fb_deferred_io *fbdefio; + struct fb_deferred_io_pageref *pageref; + vm_fault_t ret; + + /* protect against the workqueue changing the page list */ +- mutex_lock(&fbdefio->lock); ++ mutex_lock(&fbdefio_state->lock); ++ ++ info = fbdefio_state->info; ++ if (!info) { ++ ret = VM_FAULT_SIGBUS; /* our device is gone */ ++ goto err_mutex_unlock; ++ } ++ ++ fbdefio = info->fbdefio; + + pageref = fb_deferred_io_pageref_get(info, offset, page); + if (WARN_ON_ONCE(!pageref)) { +@@ -202,50 +301,38 @@ static vm_fault_t fb_deferred_io_track_p + */ + lock_page(pageref->page); + +- mutex_unlock(&fbdefio->lock); ++ mutex_unlock(&fbdefio_state->lock); + + /* come back after delay to process the deferred IO */ + schedule_delayed_work(&info->deferred_work, fbdefio->delay); + return VM_FAULT_LOCKED; + + err_mutex_unlock: +- mutex_unlock(&fbdefio->lock); ++ mutex_unlock(&fbdefio_state->lock); + return ret; + } + +-/* +- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O +- * @fb_info: The fbdev info structure +- * @vmf: The VM fault +- * +- * This is a callback we get when userspace first tries to +- * write to the page. We schedule a workqueue. That workqueue +- * will eventually mkclean the touched pages and execute the +- * deferred framebuffer IO. Then if userspace touches a page +- * again, we repeat the same scheme. +- * +- * Returns: +- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise. +- */ +-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) ++static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state, ++ struct vm_fault *vmf) + { + unsigned long offset = vmf->pgoff << PAGE_SHIFT; + struct page *page = vmf->page; + + file_update_time(vmf->vma->vm_file); + +- return fb_deferred_io_track_page(info, offset, page); ++ return fb_deferred_io_track_page(fbdefio_state, offset, page); + } + +-/* vm_ops->page_mkwrite handler */ + static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) + { +- struct fb_info *info = vmf->vma->vm_private_data; ++ struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data; + +- return fb_deferred_io_page_mkwrite(info, vmf); ++ return fb_deferred_io_page_mkwrite(fbdefio_state, vmf); + } + + static const struct vm_operations_struct fb_deferred_io_vm_ops = { ++ .open = fb_deferred_io_vm_open, ++ .close = fb_deferred_io_vm_close, + .fault = fb_deferred_io_fault, + .page_mkwrite = fb_deferred_io_mkwrite, + }; +@@ -262,7 +349,10 @@ int fb_deferred_io_mmap(struct fb_info * + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + if (!(info->flags & FBINFO_VIRTFB)) + vm_flags_set(vma, VM_IO); +- vma->vm_private_data = info; ++ vma->vm_private_data = info->fbdefio_state; ++ ++ fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */ ++ + return 0; + } + EXPORT_SYMBOL_GPL(fb_deferred_io_mmap); +@@ -273,9 +363,10 @@ static void fb_deferred_io_work(struct w + struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); + struct fb_deferred_io_pageref *pageref, *next; + struct fb_deferred_io *fbdefio = info->fbdefio; ++ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state; + + /* here we mkclean the pages, then do all deferred IO */ +- mutex_lock(&fbdefio->lock); ++ mutex_lock(&fbdefio_state->lock); + list_for_each_entry(pageref, &fbdefio->pagereflist, list) { + struct folio *folio = page_folio(pageref->page); + +@@ -291,12 +382,13 @@ static void fb_deferred_io_work(struct w + list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list) + fb_deferred_io_pageref_put(pageref, info); + +- mutex_unlock(&fbdefio->lock); ++ mutex_unlock(&fbdefio_state->lock); + } + + int fb_deferred_io_init(struct fb_info *info) + { + struct fb_deferred_io *fbdefio = info->fbdefio; ++ struct fb_deferred_io_state *fbdefio_state; + struct fb_deferred_io_pageref *pagerefs; + unsigned long npagerefs; + int ret; +@@ -306,7 +398,11 @@ int fb_deferred_io_init(struct fb_info * + if (WARN_ON(!info->fix.smem_len)) + return -EINVAL; + +- mutex_init(&fbdefio->lock); ++ fbdefio_state = fb_deferred_io_state_alloc(); ++ if (!fbdefio_state) ++ return -ENOMEM; ++ fbdefio_state->info = info; ++ + INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); + INIT_LIST_HEAD(&fbdefio->pagereflist); + if (fbdefio->delay == 0) /* set a default of 1 s */ +@@ -323,10 +419,12 @@ int fb_deferred_io_init(struct fb_info * + info->npagerefs = npagerefs; + info->pagerefs = pagerefs; + ++ info->fbdefio_state = fbdefio_state; ++ + return 0; + + err: +- mutex_destroy(&fbdefio->lock); ++ fb_deferred_io_state_release(fbdefio_state); + return ret; + } + EXPORT_SYMBOL_GPL(fb_deferred_io_init); +@@ -364,11 +462,18 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release + + void fb_deferred_io_cleanup(struct fb_info *info) + { +- struct fb_deferred_io *fbdefio = info->fbdefio; ++ struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state; + + fb_deferred_io_lastclose(info); + ++ info->fbdefio_state = NULL; ++ ++ mutex_lock(&fbdefio_state->lock); ++ fbdefio_state->info = NULL; ++ mutex_unlock(&fbdefio_state->lock); ++ ++ fb_deferred_io_state_put(fbdefio_state); ++ + kvfree(info->pagerefs); +- mutex_destroy(&fbdefio->lock); + } + EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); +--- a/include/linux/fb.h ++++ b/include/linux/fb.h +@@ -222,12 +222,13 @@ struct fb_deferred_io { + unsigned long delay; + bool sort_pagereflist; /* sort pagelist by offset */ + int open_count; /* number of opened files; protected by fb_info lock */ +- struct mutex lock; /* mutex that protects the pageref list */ + struct list_head pagereflist; /* list of pagerefs for touched pages */ + /* callback */ + struct page *(*get_page)(struct fb_info *info, unsigned long offset); + void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); + }; ++ ++struct fb_deferred_io_state; + #endif + + /* +@@ -485,6 +486,7 @@ struct fb_info { + unsigned long npagerefs; + struct fb_deferred_io_pageref *pagerefs; + struct fb_deferred_io *fbdefio; ++ struct fb_deferred_io_state *fbdefio_state; + #endif + + const struct fb_ops *fbops; diff --git a/queue-6.12/fs-prepare-for-adding-lsm-blob-to-backing_file.patch b/queue-6.12/fs-prepare-for-adding-lsm-blob-to-backing_file.patch new file mode 100644 index 0000000000..103dd9302d --- /dev/null +++ b/queue-6.12/fs-prepare-for-adding-lsm-blob-to-backing_file.patch @@ -0,0 +1,83 @@ +From stable+bounces-243940-greg=kroah.com@vger.kernel.org Tue May 5 02:16:25 2026 +From: Sasha Levin +Date: Mon, 4 May 2026 20:16:14 -0400 +Subject: fs: prepare for adding LSM blob to backing_file +To: stable@vger.kernel.org +Cc: Amir Goldstein , linux-fsdevel@vger.kernel.org, linux-unionfs@vger.kernel.org, linux-erofs@lists.ozlabs.org, Serge Hallyn , Paul Moore , Sasha Levin +Message-ID: <20260505001614.127730-1-sashal@kernel.org> + +From: Amir Goldstein + +[ Upstream commit 880bd496ec72a6dcb00cb70c430ef752ba242ae7 ] + +In preparation to adding LSM blob to backing_file struct, factor out +helpers init_backing_file() and backing_file_free(). + +Cc: stable@vger.kernel.org +Cc: linux-fsdevel@vger.kernel.org +Cc: linux-unionfs@vger.kernel.org +Cc: linux-erofs@lists.ozlabs.org +Signed-off-by: Amir Goldstein +Reviewed-by: Serge Hallyn +[PM: use the term "LSM blob", fix comment style to match file] +Signed-off-by: Paul Moore +[ Used kfree() instead of kmem_cache_free(bfilp_cachep, ff) ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/file_table.c | 22 ++++++++++++++++++++-- + 1 file changed, 20 insertions(+), 2 deletions(-) + +--- a/fs/file_table.c ++++ b/fs/file_table.c +@@ -60,6 +60,12 @@ struct path *backing_file_user_path(stru + } + EXPORT_SYMBOL_GPL(backing_file_user_path); + ++static inline void backing_file_free(struct backing_file *ff) ++{ ++ path_put(&ff->user_path); ++ kfree(ff); ++} ++ + static inline void file_free(struct file *f) + { + security_file_free(f); +@@ -67,8 +73,7 @@ static inline void file_free(struct file + percpu_counter_dec(&nr_files); + put_cred(f->f_cred); + if (unlikely(f->f_mode & FMODE_BACKING)) { +- path_put(backing_file_user_path(f)); +- kfree(backing_file(f)); ++ backing_file_free(backing_file(f)); + } else { + kmem_cache_free(filp_cachep, f); + } +@@ -255,6 +260,12 @@ struct file *alloc_empty_file_noaccount( + return f; + } + ++static int init_backing_file(struct backing_file *ff) ++{ ++ memset(&ff->user_path, 0, sizeof(ff->user_path)); ++ return 0; ++} ++ + /* + * Variant of alloc_empty_file() that allocates a backing_file container + * and doesn't check and modify nr_files. +@@ -277,7 +288,14 @@ struct file *alloc_empty_backing_file(in + return ERR_PTR(error); + } + ++ /* The f_mode flags must be set before fput(). */ + ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT; ++ error = init_backing_file(ff); ++ if (unlikely(error)) { ++ fput(&ff->file); ++ return ERR_PTR(error); ++ } ++ + return &ff->file; + } + diff --git a/queue-6.12/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch b/queue-6.12/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch new file mode 100644 index 0000000000..0609a67333 --- /dev/null +++ b/queue-6.12/gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch @@ -0,0 +1,61 @@ +From stable+bounces-244956-greg=kroah.com@vger.kernel.org Sat May 9 16:22:03 2026 +From: Sasha Levin +Date: Sat, 9 May 2026 10:21:55 -0400 +Subject: gtp: disable BH before calling udp_tunnel_xmit_skb() +To: stable@vger.kernel.org +Cc: David Carlier , Jakub Kicinski , Sasha Levin +Message-ID: <20260509142155.3462128-1-sashal@kernel.org> + +From: David Carlier + +[ Upstream commit 5638504a2aa9e1b9d72af9060df1a160cce2d379 ] + +gtp_genl_send_echo_req() runs as a generic netlink doit handler in +process context with BH not disabled. It calls udp_tunnel_xmit_skb(), +which eventually invokes iptunnel_xmit() — that uses __this_cpu_inc/dec +on softnet_data.xmit.recursion to track the tunnel xmit recursion level. + +Without local_bh_disable(), the task may migrate between +dev_xmit_recursion_inc() and dev_xmit_recursion_dec(), breaking the +per-CPU counter pairing. The result is stale or negative recursion +levels that can later produce false-positive +SKB_DROP_REASON_RECURSION_LIMIT drops on either CPU. + +The other udp_tunnel_xmit_skb() call sites in gtp.c are unaffected: +the data path runs under ndo_start_xmit and the echo response handlers +run from the UDP encap rx softirq, both with BH already disabled. + +Fix it by disabling BH around the udp_tunnel_xmit_skb() call, mirroring +commit 2cd7e6971fc2 ("sctp: disable BH before calling +udp_tunnel_xmit_skb()"). + +Fixes: 6f1a9140ecda ("net: add xmit recursion limit to tunnel xmit functions") +Cc: stable@vger.kernel.org +Signed-off-by: David Carlier +Link: https://patch.msgid.link/20260417055408.4667-1-devnexen@gmail.com +Signed-off-by: Jakub Kicinski +[ Context ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/gtp.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -2401,6 +2401,7 @@ static int gtp_genl_send_echo_req(struct + return -ENODEV; + } + ++ local_bh_disable(); + udp_tunnel_xmit_skb(rt, sk, skb_to_send, + fl4.saddr, fl4.daddr, + fl4.flowi4_tos, +@@ -2410,6 +2411,7 @@ static int gtp_genl_send_echo_req(struct + !net_eq(sock_net(sk), + dev_net(gtp->dev)), + false); ++ local_bh_enable(); + return 0; + } + diff --git a/queue-6.12/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch b/queue-6.12/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch new file mode 100644 index 0000000000..993187bb58 --- /dev/null +++ b/queue-6.12/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch @@ -0,0 +1,142 @@ +From stable+bounces-244860-greg=kroah.com@vger.kernel.org Sat May 9 02:40:46 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 20:40:35 -0400 +Subject: hfsplus: fix held lock freed on hfsplus_fill_super() +To: stable@vger.kernel.org +Cc: Zilin Guan , Viacheslav Dubeyko , Sasha Levin +Message-ID: <20260509004035.2365441-2-sashal@kernel.org> + +From: Zilin Guan + +[ Upstream commit 90c500e4fd83fa33c09bc7ee23b6d9cc487ac733 ] + +hfsplus_fill_super() calls hfs_find_init() to initialize a search +structure, which acquires tree->tree_lock. If the subsequent call to +hfsplus_cat_build_key() fails, the function jumps to the out_put_root +error label without releasing the lock. The later cleanup path then +frees the tree data structure with the lock still held, triggering a +held lock freed warning. + +Fix this by adding the missing hfs_find_exit(&fd) call before jumping +to the out_put_root error label. This ensures that tree->tree_lock is +properly released on the error path. + +The bug was originally detected on v6.13-rc1 using an experimental +static analysis tool we are developing, and we have verified that the +issue persists in the latest mainline kernel. The tool is specifically +designed to detect memory management issues. It is currently under active +development and not yet publicly available. + +We confirmed the bug by runtime testing under QEMU with x86_64 defconfig, +lockdep enabled, and CONFIG_HFSPLUS_FS=y. To trigger the error path, we +used GDB to dynamically shrink the max_unistr_len parameter to 1 before +hfsplus_asc2uni() is called. This forces hfsplus_asc2uni() to naturally +return -ENAMETOOLONG, which propagates to hfsplus_cat_build_key() and +exercises the faulty error path. The following warning was observed +during mount: + + ========================= + WARNING: held lock freed! + 7.0.0-rc3-00016-gb4f0dd314b39 #4 Not tainted + ------------------------- + mount/174 is freeing memory ffff888103f92000-ffff888103f92fff, with a lock still held there! + ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0 + 2 locks held by mount/174: + #0: ffff888103f960e0 (&type->s_umount_key#42/1){+.+.}-{4:4}, at: alloc_super.constprop.0+0x167/0xa40 + #1: ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0 + + stack backtrace: + CPU: 2 UID: 0 PID: 174 Comm: mount Not tainted 7.0.0-rc3-00016-gb4f0dd314b39 #4 PREEMPT(lazy) + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014 + Call Trace: + + dump_stack_lvl+0x82/0xd0 + debug_check_no_locks_freed+0x13a/0x180 + kfree+0x16b/0x510 + ? hfsplus_fill_super+0xcb4/0x18a0 + hfsplus_fill_super+0xcb4/0x18a0 + ? __pfx_hfsplus_fill_super+0x10/0x10 + ? srso_return_thunk+0x5/0x5f + ? bdev_open+0x65f/0xc30 + ? srso_return_thunk+0x5/0x5f + ? pointer+0x4ce/0xbf0 + ? trace_contention_end+0x11c/0x150 + ? __pfx_pointer+0x10/0x10 + ? srso_return_thunk+0x5/0x5f + ? bdev_open+0x79b/0xc30 + ? srso_return_thunk+0x5/0x5f + ? srso_return_thunk+0x5/0x5f + ? vsnprintf+0x6da/0x1270 + ? srso_return_thunk+0x5/0x5f + ? __mutex_unlock_slowpath+0x157/0x740 + ? __pfx_vsnprintf+0x10/0x10 + ? srso_return_thunk+0x5/0x5f + ? srso_return_thunk+0x5/0x5f + ? mark_held_locks+0x49/0x80 + ? srso_return_thunk+0x5/0x5f + ? srso_return_thunk+0x5/0x5f + ? irqentry_exit+0x17b/0x5e0 + ? trace_irq_disable.constprop.0+0x116/0x150 + ? __pfx_hfsplus_fill_super+0x10/0x10 + ? __pfx_hfsplus_fill_super+0x10/0x10 + get_tree_bdev_flags+0x302/0x580 + ? __pfx_get_tree_bdev_flags+0x10/0x10 + ? vfs_parse_fs_qstr+0x129/0x1a0 + ? __pfx_vfs_parse_fs_qstr+0x3/0x10 + vfs_get_tree+0x89/0x320 + fc_mount+0x10/0x1d0 + path_mount+0x5c5/0x21c0 + ? __pfx_path_mount+0x10/0x10 + ? trace_irq_enable.constprop.0+0x116/0x150 + ? trace_irq_enable.constprop.0+0x116/0x150 + ? srso_return_thunk+0x5/0x5f + ? srso_return_thunk+0x5/0x5f + ? kmem_cache_free+0x307/0x540 + ? user_path_at+0x51/0x60 + ? __x64_sys_mount+0x212/0x280 + ? srso_return_thunk+0x5/0x5f + __x64_sys_mount+0x212/0x280 + ? __pfx___x64_sys_mount+0x10/0x10 + ? srso_return_thunk+0x5/0x5f + ? trace_irq_enable.constprop.0+0x116/0x150 + ? srso_return_thunk+0x5/0x5f + do_syscall_64+0x111/0x680 + entry_SYSCALL_64_after_hwframe+0x77/0x7f + RIP: 0033:0x7ffacad55eae + Code: 48 8b 0d 85 1f 0f 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 a5 00 00 8 + RSP: 002b:00007fff1ab55718 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5 + RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ffacad55eae + RDX: 000055740c64e5b0 RSI: 000055740c64e630 RDI: 000055740c651ab0 + RBP: 000055740c64e380 R08: 0000000000000000 R09: 0000000000000001 + R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 + R13: 000055740c64e5b0 R14: 000055740c651ab0 R15: 000055740c64e380 + + +After applying this patch, the warning no longer appears. + +Fixes: 89ac9b4d3d1a ("hfsplus: fix longname handling") +CC: stable@vger.kernel.org +Signed-off-by: Zilin Guan +Reviewed-by: Viacheslav Dubeyko +Tested-by: Viacheslav Dubeyko +Signed-off-by: Viacheslav Dubeyko +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/hfsplus/super.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/fs/hfsplus/super.c ++++ b/fs/hfsplus/super.c +@@ -545,8 +545,10 @@ static int hfsplus_fill_super(struct sup + if (err) + goto out_put_root; + err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); +- if (unlikely(err < 0)) ++ if (unlikely(err < 0)) { ++ hfs_find_exit(&fd); + goto out_put_root; ++ } + if (!hfsplus_brec_read_cat(&fd, &entry)) { + hfs_find_exit(&fd); + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { diff --git a/queue-6.12/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch b/queue-6.12/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch new file mode 100644 index 0000000000..4dc3018d56 --- /dev/null +++ b/queue-6.12/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch @@ -0,0 +1,189 @@ +From stable+bounces-244859-greg=kroah.com@vger.kernel.org Sat May 9 02:40:43 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 20:40:34 -0400 +Subject: hfsplus: fix uninit-value by validating catalog record size +To: stable@vger.kernel.org +Cc: Deepanshu Kartikey , syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com, Viacheslav Dubeyko , Charalampos Mitrodimas , Sasha Levin +Message-ID: <20260509004035.2365441-1-sashal@kernel.org> + +From: Deepanshu Kartikey + +[ Upstream commit b6b592275aeff184aa82fcf6abccd833fb71b393 ] + +Syzbot reported a KMSAN uninit-value issue in hfsplus_strcasecmp(). The +root cause is that hfs_brec_read() doesn't validate that the on-disk +record size matches the expected size for the record type being read. + +When mounting a corrupted filesystem, hfs_brec_read() may read less data +than expected. For example, when reading a catalog thread record, the +debug output showed: + + HFSPLUS_BREC_READ: rec_len=520, fd->entrylength=26 + HFSPLUS_BREC_READ: WARNING - entrylength (26) < rec_len (520) - PARTIAL READ! + +hfs_brec_read() only validates that entrylength is not greater than the +buffer size, but doesn't check if it's less than expected. It successfully +reads 26 bytes into a 520-byte structure and returns success, leaving 494 +bytes uninitialized. + +This uninitialized data in tmp.thread.nodeName then gets copied by +hfsplus_cat_build_key_uni() and used by hfsplus_strcasecmp(), triggering +the KMSAN warning when the uninitialized bytes are used as array indices +in case_fold(). + +Fix by introducing hfsplus_brec_read_cat() wrapper that: +1. Calls hfs_brec_read() to read the data +2. Validates the record size based on the type field: + - Fixed size for folder and file records + - Variable size for thread records (depends on string length) +3. Returns -EIO if size doesn't match expected + +For thread records, check against HFSPLUS_MIN_THREAD_SZ before reading +nodeName.length to avoid reading uninitialized data at call sites that +don't zero-initialize the entry structure. + +Also initialize the tmp variable in hfsplus_find_cat() as defensive +programming to ensure no uninitialized data even if validation is +bypassed. + +Reported-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com +Closes: https://syzkaller.appspot.com/bug?extid=d80abb5b890d39261e72 +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Tested-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com +Reviewed-by: Viacheslav Dubeyko +Tested-by: Viacheslav Dubeyko +Suggested-by: Charalampos Mitrodimas +Link: https://lore.kernel.org/all/20260120051114.1281285-1-kartikey406@gmail.com/ [v1] +Link: https://lore.kernel.org/all/20260121063109.1830263-1-kartikey406@gmail.com/ [v2] +Link: https://lore.kernel.org/all/20260212014233.2422046-1-kartikey406@gmail.com/ [v3] +Link: https://lore.kernel.org/all/20260214002100.436125-1-kartikey406@gmail.com/T/ [v4] +Link: https://lore.kernel.org/all/20260221061626.15853-1-kartikey406@gmail.com/T/ [v5] +Signed-off-by: Deepanshu Kartikey +Signed-off-by: Viacheslav Dubeyko +Link: https://lore.kernel.org/r/20260307010302.41547-1-kartikey406@gmail.com +Signed-off-by: Viacheslav Dubeyko +Stable-dep-of: 90c500e4fd83 ("hfsplus: fix held lock freed on hfsplus_fill_super()") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/hfsplus/bfind.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++ + fs/hfsplus/catalog.c | 4 +-- + fs/hfsplus/dir.c | 2 - + fs/hfsplus/hfsplus_fs.h | 9 ++++++++ + fs/hfsplus/super.c | 2 - + 5 files changed, 64 insertions(+), 4 deletions(-) + +--- a/fs/hfsplus/bfind.c ++++ b/fs/hfsplus/bfind.c +@@ -287,3 +287,54 @@ out: + fd->bnode = bnode; + return res; + } ++ ++/** ++ * hfsplus_brec_read_cat - read and validate a catalog record ++ * @fd: find data structure ++ * @entry: pointer to catalog entry to read into ++ * ++ * Reads a catalog record and validates its size matches the expected ++ * size based on the record type. ++ * ++ * Returns 0 on success, or negative error code on failure. ++ */ ++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry) ++{ ++ int res; ++ u32 expected_size; ++ ++ res = hfs_brec_read(fd, entry, sizeof(hfsplus_cat_entry)); ++ if (res) ++ return res; ++ ++ /* Validate catalog record size based on type */ ++ switch (be16_to_cpu(entry->type)) { ++ case HFSPLUS_FOLDER: ++ expected_size = sizeof(struct hfsplus_cat_folder); ++ break; ++ case HFSPLUS_FILE: ++ expected_size = sizeof(struct hfsplus_cat_file); ++ break; ++ case HFSPLUS_FOLDER_THREAD: ++ case HFSPLUS_FILE_THREAD: ++ /* Ensure we have at least the fixed fields before reading nodeName.length */ ++ if (fd->entrylength < HFSPLUS_MIN_THREAD_SZ) { ++ pr_err("thread record too short (got %u)\n", fd->entrylength); ++ return -EIO; ++ } ++ expected_size = hfsplus_cat_thread_size(&entry->thread); ++ break; ++ default: ++ pr_err("unknown catalog record type %d\n", ++ be16_to_cpu(entry->type)); ++ return -EIO; ++ } ++ ++ if (fd->entrylength != expected_size) { ++ pr_err("catalog record size mismatch (type %d, got %u, expected %u)\n", ++ be16_to_cpu(entry->type), fd->entrylength, expected_size); ++ return -EIO; ++ } ++ ++ return 0; ++} +--- a/fs/hfsplus/catalog.c ++++ b/fs/hfsplus/catalog.c +@@ -194,12 +194,12 @@ static int hfsplus_fill_cat_thread(struc + int hfsplus_find_cat(struct super_block *sb, u32 cnid, + struct hfs_find_data *fd) + { +- hfsplus_cat_entry tmp; ++ hfsplus_cat_entry tmp = {0}; + int err; + u16 type; + + hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid); +- err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); ++ err = hfsplus_brec_read_cat(fd, &tmp); + if (err) + return err; + +--- a/fs/hfsplus/dir.c ++++ b/fs/hfsplus/dir.c +@@ -49,7 +49,7 @@ static struct dentry *hfsplus_lookup(str + if (unlikely(err < 0)) + goto fail; + again: +- err = hfs_brec_read(&fd, &entry, sizeof(entry)); ++ err = hfsplus_brec_read_cat(&fd, &entry); + if (err) { + if (err == -ENOENT) { + hfs_find_exit(&fd); +--- a/fs/hfsplus/hfsplus_fs.h ++++ b/fs/hfsplus/hfsplus_fs.h +@@ -536,6 +536,15 @@ int hfsplus_submit_bio(struct super_bloc + void **data, blk_opf_t opf); + int hfsplus_read_wrapper(struct super_block *sb); + ++static inline u32 hfsplus_cat_thread_size(const struct hfsplus_cat_thread *thread) ++{ ++ return offsetof(struct hfsplus_cat_thread, nodeName) + ++ offsetof(struct hfsplus_unistr, unicode) + ++ be16_to_cpu(thread->nodeName.length) * sizeof(hfsplus_unichr); ++} ++ ++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry); ++ + /* + * time helpers: convert between 1904-base and 1970-base timestamps + * +--- a/fs/hfsplus/super.c ++++ b/fs/hfsplus/super.c +@@ -547,7 +547,7 @@ static int hfsplus_fill_super(struct sup + err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); + if (unlikely(err < 0)) + goto out_put_root; +- if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { ++ if (!hfsplus_brec_read_cat(&fd, &entry)) { + hfs_find_exit(&fd); + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { + err = -EIO; diff --git a/queue-6.12/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch b/queue-6.12/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch new file mode 100644 index 0000000000..72fad5f8f5 --- /dev/null +++ b/queue-6.12/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch @@ -0,0 +1,55 @@ +From stable+bounces-243986-greg=kroah.com@vger.kernel.org Tue May 5 08:01:57 2026 +From: Sasha Levin +Date: Tue, 5 May 2026 01:59:21 -0400 +Subject: hwmon: (powerz) Avoid cacheline sharing for DMA buffer +To: stable@vger.kernel.org +Cc: "Thomas Weißschuh" , "Guenter Roeck" , "Sasha Levin" +Message-ID: <20260505055921.224904-3-sashal@kernel.org> + +From: Thomas Weißschuh + +[ Upstream commit 3023c050af3600bf451153335dea5e073c9a3088 ] + +Depending on the architecture the transfer buffer may share a cacheline +with the following mutex. As the buffer may be used for DMA, that is +problematic. + +Use the high-level DMA helpers to make sure that cacheline sharing can +not happen. + +Also drop the comment, as the helpers are documentation enough. + +https://sashiko.dev/#/message/20260408175814.934BFC19421%40smtp.kernel.org + +Fixes: 4381a36abdf1c ("hwmon: add POWER-Z driver") +Cc: stable@vger.kernel.org # ca085faabb42: dma-mapping: add __dma_from_device_group_begin()/end() +Signed-off-by: Thomas Weißschuh +Link: https://lore.kernel.org/r/20260408-powerz-cacheline-alias-v1-1-1254891be0dd@weissschuh.net +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/hwmon/powerz.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/hwmon/powerz.c ++++ b/drivers/hwmon/powerz.c +@@ -6,6 +6,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -33,7 +34,9 @@ struct powerz_sensor_data { + } __packed; + + struct powerz_priv { +- char transfer_buffer[64]; /* first member to satisfy DMA alignment */ ++ __dma_from_device_group_begin(); ++ char transfer_buffer[64]; ++ __dma_from_device_group_end(); + struct mutex mutex; + struct completion completion; + struct urb *urb; diff --git a/queue-6.12/kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch b/queue-6.12/kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch new file mode 100644 index 0000000000..ddd18bb298 --- /dev/null +++ b/queue-6.12/kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch @@ -0,0 +1,46 @@ +From stable+bounces-245814-greg=kroah.com@vger.kernel.org Tue May 12 16:56:46 2026 +From: Marc Zyngier +Date: Tue, 12 May 2026 15:49:34 +0100 +Subject: KVM: arm64: Wake-up from WFI when iqrchip is in userspace +To: stable@vger.kernel.org +Message-ID: <20260512144934.3676827-1-maz@kernel.org> + +From: Marc Zyngier + +commit 4ce98bf0865c349e7026ad9c14f48da264920953 upstream + +It appears that there is nothing in the wake-up path that +evaluates whether the in-kernel interrupts are pending unless +we have a vgic. + +This means that the userspace irqchip support has been broken for +about four years, and nobody noticed. It was also broken before +as we wouldn't wake-up on a PMU interrupt, but hey, who cares... + +It is probably time to remove the feature altogether, because it +was a terrible idea 10 years ago, and it still is. + +Fixes: b57de4ffd7c6d ("KVM: arm64: Simplify kvm_cpu_has_pending_timer()") +Link: https://patch.msgid.link/20260423163607.486345-1-maz@kernel.org +Signed-off-by: Marc Zyngier +Cc: stable@vger.kernel.org +Signed-off-by: Marc Zyngier +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm64/kvm/arm.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/arm64/kvm/arm.c ++++ b/arch/arm64/kvm/arm.c +@@ -729,6 +729,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(stru + int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) + { + bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); ++ ++ irq_lines |= (!irqchip_in_kernel(v->kvm) && ++ (kvm_timer_should_notify_user(v) || ++ kvm_pmu_should_notify_user(v))); ++ + return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) + && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); + } diff --git a/queue-6.12/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch b/queue-6.12/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch new file mode 100644 index 0000000000..3cdeffe583 --- /dev/null +++ b/queue-6.12/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch @@ -0,0 +1,94 @@ +From stable+bounces-244757-greg=kroah.com@vger.kernel.org Fri May 8 15:28:11 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 09:22:34 -0400 +Subject: mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs +To: stable@vger.kernel.org +Cc: Luke Wang , Ulf Hansson , Sasha Levin +Message-ID: <20260508132234.1478437-1-sashal@kernel.org> + +From: Luke Wang + +[ Upstream commit d6bf2e64dec87322f2b11565ddb59c0e967f96e3 ] + +Kingston eMMC IY2964 and IB2932 takes a fixed ~2 seconds for each secure +erase/trim operation regardless of size - that is, a single secure +erase/trim operation of 1MB takes the same time as 1GB. With default +calculated 3.5MB max discard size, secure erase 1GB requires ~300 separate +operations taking ~10 minutes total. + +Add a card quirk, MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME, to set maximum +secure erase size for those devices. This allows 1GB secure erase to +complete in a single operation, reducing time from 10 minutes to just 2 +seconds. + +Signed-off-by: Luke Wang +Cc: stable@vger.kernel.org +Signed-off-by: Ulf Hansson +[ adapted to use mmc_can_secure_erase_trim()/mmc_can_trim() and placed helper after mmc_card_no_uhs_ddr50_tuning() ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/mmc/core/card.h | 5 +++++ + drivers/mmc/core/queue.c | 9 +++++++-- + drivers/mmc/core/quirks.h | 9 +++++++++ + include/linux/mmc/card.h | 1 + + 4 files changed, 22 insertions(+), 2 deletions(-) + +--- a/drivers/mmc/core/card.h ++++ b/drivers/mmc/core/card.h +@@ -300,4 +300,9 @@ static inline int mmc_card_no_uhs_ddr50_ + return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING; + } + ++static inline int mmc_card_fixed_secure_erase_trim_time(const struct mmc_card *c) ++{ ++ return c->quirks & MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME; ++} ++ + #endif +--- a/drivers/mmc/core/queue.c ++++ b/drivers/mmc/core/queue.c +@@ -184,8 +184,13 @@ static void mmc_queue_setup_discard(stru + return; + + lim->max_hw_discard_sectors = max_discard; +- if (mmc_can_secure_erase_trim(card)) +- lim->max_secure_erase_sectors = max_discard; ++ if (mmc_can_secure_erase_trim(card)) { ++ if (mmc_card_fixed_secure_erase_trim_time(card)) ++ lim->max_secure_erase_sectors = UINT_MAX >> card->erase_shift; ++ else ++ lim->max_secure_erase_sectors = max_discard; ++ } ++ + if (mmc_can_trim(card) && card->erased_byte == 0) + lim->max_write_zeroes_sectors = max_discard; + +--- a/drivers/mmc/core/quirks.h ++++ b/drivers/mmc/core/quirks.h +@@ -153,6 +153,15 @@ static const struct mmc_fixup __maybe_un + MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, + MMC_QUIRK_TRIM_BROKEN), + ++ /* ++ * On Some Kingston eMMCs, secure erase/trim time is independent ++ * of erase size, fixed at approximately 2 seconds. ++ */ ++ MMC_FIXUP("IY2964", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, ++ MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME), ++ MMC_FIXUP("IB2932", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, ++ MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME), ++ + END_FIXUP + }; + +--- a/include/linux/mmc/card.h ++++ b/include/linux/mmc/card.h +@@ -296,6 +296,7 @@ struct mmc_card { + #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ + #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */ + #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */ ++#define MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME (1<<20) /* Secure erase/trim time is fixed regardless of size */ + + bool written_flag; /* Indicates eMMC has been written since power on */ + bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/queue-6.12/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch b/queue-6.12/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch new file mode 100644 index 0000000000..c359bcf3eb --- /dev/null +++ b/queue-6.12/mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch @@ -0,0 +1,46 @@ +From stable+bounces-244819-greg=kroah.com@vger.kernel.org Fri May 8 21:52:24 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 15:52:17 -0400 +Subject: mtd: spinand: winbond: Declare the QE bit on W25NxxJW +To: stable@vger.kernel.org +Cc: Miquel Raynal , Sasha Levin +Message-ID: <20260508195217.1877968-1-sashal@kernel.org> + +From: Miquel Raynal + +[ Upstream commit 7866ce992cf0d3c3b50fe8bf4acb1dbb173a2304 ] + +Factory default for this bit is "set" (at least on the chips I have), +but we must make sure it is actually set by Linux explicitly, as the +bit is writable by an earlier stage. + +Fixes: 6a804fb72de5 ("mtd: spinand: winbond: add support for serial NAND flash") +Cc: stable@vger.kernel.org +Signed-off-by: Miquel Raynal +[ adapted chip name W25N02JW to W25N02JWZEIF and applied flag change via read_cache_variants context instead of read_cache_dual_quad_dtr_variants ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/mtd/nand/spi/winbond.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/nand/spi/winbond.c ++++ b/drivers/mtd/nand/spi/winbond.c +@@ -240,7 +240,7 @@ static const struct spinand_info winbond + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&w25n01jw_ooblayout, NULL)), + SPINAND_INFO("W25N02JWZEIF", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22), +@@ -249,7 +249,7 @@ static const struct spinand_info winbond + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), +- 0, ++ SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)), + SPINAND_INFO("W25N512GW", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20), diff --git a/queue-6.12/net-stmmac-avoid-shadowing-global-buf_sz.patch b/queue-6.12/net-stmmac-avoid-shadowing-global-buf_sz.patch new file mode 100644 index 0000000000..80c3eb53cc --- /dev/null +++ b/queue-6.12/net-stmmac-avoid-shadowing-global-buf_sz.patch @@ -0,0 +1,51 @@ +From stable+bounces-245025-greg=kroah.com@vger.kernel.org Sun May 10 16:22:57 2026 +From: Sasha Levin +Date: Sun, 10 May 2026 10:22:45 -0400 +Subject: net: stmmac: avoid shadowing global buf_sz +To: stable@vger.kernel.org +Cc: "Russell King (Oracle)" , Furong Xu <0x1207@gmail.com>, Jakub Kicinski , Sasha Levin +Message-ID: <20260510142247.4179438-1-sashal@kernel.org> + +From: "Russell King (Oracle)" + +[ Upstream commit 876cfb20e8892143c0c967b3657074f9131f9b5f ] + +stmmac_rx() declares a local variable named "buf_sz" but there is also +a global variable for a module parameter which is called the same. To +avoid confusion, rename the local variable. + +Signed-off-by: Russell King (Oracle) +Reviewed-by: Furong Xu <0x1207@gmail.com> +Link: https://patch.msgid.link/E1tpswi-005U6C-Py@rmk-PC.armlinux.org.uk +Signed-off-by: Jakub Kicinski +Stable-dep-of: 0bb05e6adfa9 ("net: stmmac: Prevent NULL deref when RX memory exhausted") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -5406,10 +5406,10 @@ static int stmmac_rx(struct stmmac_priv + struct sk_buff *skb = NULL; + struct stmmac_xdp_buff ctx; + int xdp_status = 0; +- int buf_sz; ++ int bufsz; + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); +- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; ++ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); + + if (netif_msg_rx_status(priv)) { +@@ -5524,7 +5524,7 @@ read_again: + dma_sync_single_for_cpu(priv->device, buf->addr, + buf1_len, dma_dir); + +- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); ++ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq); + xdp_prepare_buff(&ctx.xdp, page_address(buf->page), + buf->page_offset, buf1_len, true); + diff --git a/queue-6.12/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch b/queue-6.12/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch new file mode 100644 index 0000000000..97d8cf6829 --- /dev/null +++ b/queue-6.12/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch @@ -0,0 +1,120 @@ +From stable+bounces-245027-greg=kroah.com@vger.kernel.org Sun May 10 16:22:54 2026 +From: Sasha Levin +Date: Sun, 10 May 2026 10:22:47 -0400 +Subject: net: stmmac: Prevent NULL deref when RX memory exhausted +To: stable@vger.kernel.org +Cc: Sam Edwards , Russell King , Sam Edwards , Paolo Abeni , Sasha Levin +Message-ID: <20260510142247.4179438-3-sashal@kernel.org> + +From: Sam Edwards + +[ Upstream commit 0bb05e6adfa99a2ea1fee1125cc0953409f83ed8 ] + +The CPU receives frames from the MAC through conventional DMA: the CPU +allocates buffers for the MAC, then the MAC fills them and returns +ownership to the CPU. For each hardware RX queue, the CPU and MAC +coordinate through a shared ring array of DMA descriptors: one +descriptor per DMA buffer. Each descriptor includes the buffer's +physical address and a status flag ("OWN") indicating which side owns +the buffer: OWN=0 for CPU, OWN=1 for MAC. The CPU is only allowed to set +the flag and the MAC is only allowed to clear it, and both must move +through the ring in sequence: thus the ring is used for both +"submissions" and "completions." + +In the stmmac driver, stmmac_rx() bookmarks its position in the ring +with the `cur_rx` index. The main receive loop in that function checks +for rx_descs[cur_rx].own=0, gives the corresponding buffer to the +network stack (NULLing the pointer), and increments `cur_rx` modulo the +ring size. After the loop exits, stmmac_rx_refill(), which bookmarks its +position with `dirty_rx`, allocates fresh buffers and rearms the +descriptors (setting OWN=1). If it fails any allocation, it simply stops +early (leaving OWN=0) and will retry where it left off when next called. + +This means descriptors have a three-stage lifecycle (terms my own): +- `empty` (OWN=1, buffer valid) +- `full` (OWN=0, buffer valid and populated) +- `dirty` (OWN=0, buffer NULL) + +But because stmmac_rx() only checks OWN, it confuses `full`/`dirty`. In +the past (see 'Fixes:'), there was a bug where the loop could cycle +`cur_rx` all the way back to the first descriptor it dirtied, resulting +in a NULL dereference when mistaken for `full`. The aforementioned +commit resolved that *specific* failure by capping the loop's iteration +limit at `dma_rx_size - 1`, but this is only a partial fix: if the +previous stmmac_rx_refill() didn't complete, then there are leftover +`dirty` descriptors that the loop might encounter without needing to +cycle fully around. The current code therefore panics (see 'Closes:') +when stmmac_rx_refill() is memory-starved long enough for `cur_rx` to +catch up to `dirty_rx`. + +Fix this by explicitly checking, before advancing `cur_rx`, if the next +entry is dirty; exit the loop if so. This prevents processing of the +final, used descriptor until stmmac_rx_refill() succeeds, but +fully prevents the `cur_rx == dirty_rx` ambiguity as the previous bugfix +intended: so remove the clamp as well. Since stmmac_rx_zc() is a +copy-paste-and-tweak of stmmac_rx() and the code structure is identical, +any fix to stmmac_rx() will also need a corresponding fix for +stmmac_rx_zc(). Therefore, apply the same check there. + +In stmmac_rx() (not stmmac_rx_zc()), a related bug remains: after the +MAC sets OWN=0 on the final descriptor, it will be unable to send any +further DMA-complete IRQs until it's given more `empty` descriptors. +Currently, the driver simply *hopes* that the next stmmac_rx_refill() +succeeds, risking an indefinite stall of the receive process if not. But +this is not a regression, so it can be addressed in a future change. + +Fixes: b6cb4541853c7 ("net: stmmac: avoid rx queue overrun") +Closes: https://bugzilla.kernel.org/show_bug.cgi?id=221010 +Cc: stable@vger.kernel.org +Suggested-by: Russell King +Signed-off-by: Sam Edwards +Link: https://patch.msgid.link/20260422044503.5349-1-CFSworks@gmail.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -5270,9 +5270,12 @@ read_again: + break; + + /* Prefetch the next RX descriptor */ +- rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, +- priv->dma_conf.dma_rx_size); +- next_entry = rx_q->cur_rx; ++ next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx, ++ priv->dma_conf.dma_rx_size); ++ if (unlikely(next_entry == rx_q->dirty_rx)) ++ break; ++ ++ rx_q->cur_rx = next_entry; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); +@@ -5410,7 +5413,6 @@ static int stmmac_rx(struct stmmac_priv + + dma_dir = page_pool_get_dma_dir(rx_q->page_pool); + bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; +- limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); + + if (netif_msg_rx_status(priv)) { + void *rx_head; +@@ -5466,9 +5468,12 @@ read_again: + if (unlikely(status & dma_own)) + break; + +- rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, +- priv->dma_conf.dma_rx_size); +- next_entry = rx_q->cur_rx; ++ next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx, ++ priv->dma_conf.dma_rx_size); ++ if (unlikely(next_entry == rx_q->dirty_rx)) ++ break; ++ ++ rx_q->cur_rx = next_entry; + + if (priv->extend_desc) + np = (struct dma_desc *)(rx_q->dma_erx + next_entry); diff --git a/queue-6.12/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch b/queue-6.12/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch new file mode 100644 index 0000000000..2bb1e8e535 --- /dev/null +++ b/queue-6.12/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch @@ -0,0 +1,181 @@ +From stable+bounces-245026-greg=kroah.com@vger.kernel.org Sun May 10 16:22:56 2026 +From: Sasha Levin +Date: Sun, 10 May 2026 10:22:46 -0400 +Subject: net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY() +To: stable@vger.kernel.org +Cc: "Russell King (Oracle)" , Jakub Kicinski , Sasha Levin +Message-ID: <20260510142247.4179438-2-sashal@kernel.org> + +From: "Russell King (Oracle)" + +[ Upstream commit 6b4286e0550814cdc4b897f881ec1fa8b0313227 ] + +STMMAC_GET_ENTRY() doesn't describe what this macro is doing - it is +incrementing the provided index for the circular array of descriptors. +Replace "GET" with "NEXT" as this better describes the action here. + +Signed-off-by: Russell King (Oracle) +Link: https://patch.msgid.link/E1w2vba-0000000DbWo-1oL5@rmk-PC.armlinux.org.uk +Signed-off-by: Jakub Kicinski +Stable-dep-of: 0bb05e6adfa9 ("net: stmmac: Prevent NULL deref when RX memory exhausted") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 2 - + drivers/net/ethernet/stmicro/stmmac/common.h | 2 - + drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 2 - + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 26 +++++++++++----------- + 4 files changed, 16 insertions(+), 16 deletions(-) + +--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +@@ -47,7 +47,7 @@ static int jumbo_frm(struct stmmac_tx_qu + + while (len != 0) { + tx_q->tx_skbuff[entry] = NULL; +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + desc = tx_q->dma_tx + entry; + + if (len > bmax) { +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -55,7 +55,7 @@ + #define DMA_MIN_RX_SIZE 64 + #define DMA_MAX_RX_SIZE 1024 + #define DMA_DEFAULT_RX_SIZE 512 +-#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) ++#define STMMAC_NEXT_ENTRY(x, size) ((x + 1) & (size - 1)) + + #undef FRAME_FILTER_DEBUG + /* #define FRAME_FILTER_DEBUG */ +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -51,7 +51,7 @@ static int jumbo_frm(struct stmmac_tx_qu + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); + tx_q->tx_skbuff[entry] = NULL; +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + + if (priv->extend_desc) + desc = (struct dma_desc *)(tx_q->dma_etx + entry); +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2583,7 +2583,7 @@ static bool stmmac_xdp_xmit_zc(struct st + xsk_tx_metadata_to_compl(meta, + &tx_q->tx_skbuff_dma[entry].xsk_meta); + +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); ++ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + entry = tx_q->cur_tx; + } + u64_stats_update_begin(&txq_stats->napi_syncp); +@@ -2754,7 +2754,7 @@ static int stmmac_tx_clean(struct stmmac + + stmmac_release_tx_desc(priv, p, priv->mode); + +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + } + tx_q->dirty_tx = entry; + +@@ -4076,7 +4076,7 @@ static bool stmmac_vlan_insert(struct st + return false; + + stmmac_set_tx_owner(priv, p); +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); ++ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + return true; + } + +@@ -4104,7 +4104,7 @@ static void stmmac_tso_allocator(struct + while (tmp_len > 0) { + dma_addr_t curr_addr; + +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, ++ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + +@@ -4250,7 +4250,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + + stmmac_set_mss(priv, mss_desc, mss); + tx_q->mss = mss; +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, ++ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, + priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); + } +@@ -4369,7 +4369,7 @@ static netdev_tx_t stmmac_tso_xmit(struc + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ +- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); ++ tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); + + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", +@@ -4568,7 +4568,7 @@ static netdev_tx_t stmmac_xmit(struct sk + int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); + +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + WARN_ON(tx_q->tx_skbuff[entry]); + + if (likely(priv->extend_desc)) +@@ -4638,7 +4638,7 @@ static netdev_tx_t stmmac_xmit(struct sk + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + if (netif_msg_pktdata(priv)) { +@@ -4807,7 +4807,7 @@ static inline void stmmac_rx_refill(stru + dma_wmb(); + stmmac_set_rx_owner(priv, p, use_rx_wd); + +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + rx_q->dirty_rx = entry; + rx_q->rx_tail_addr = rx_q->dma_rx_phy + +@@ -4941,7 +4941,7 @@ static int stmmac_xdp_xmit_xdpf(struct s + + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); + +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size); + tx_q->cur_tx = entry; + + return STMMAC_XDP_TX; +@@ -5175,7 +5175,7 @@ static bool stmmac_rx_refill_zc(struct s + dma_wmb(); + stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); + +- entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); ++ entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size); + } + + if (rx_desc) { +@@ -5270,7 +5270,7 @@ read_again: + break; + + /* Prefetch the next RX descriptor */ +- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, ++ rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + +@@ -5466,7 +5466,7 @@ read_again: + if (unlikely(status & dma_own)) + break; + +- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, ++ rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx, + priv->dma_conf.dma_rx_size); + next_entry = rx_q->cur_rx; + diff --git a/queue-6.12/octeon_ep_vf-add-null-check-for-napi_build_skb.patch b/queue-6.12/octeon_ep_vf-add-null-check-for-napi_build_skb.patch new file mode 100644 index 0000000000..baa0ec655d --- /dev/null +++ b/queue-6.12/octeon_ep_vf-add-null-check-for-napi_build_skb.patch @@ -0,0 +1,86 @@ +From stable+bounces-244069-greg=kroah.com@vger.kernel.org Tue May 5 12:06:42 2026 +From: Sasha Levin +Date: Tue, 5 May 2026 05:54:50 -0400 +Subject: octeon_ep_vf: add NULL check for napi_build_skb() +To: stable@vger.kernel.org +Cc: David Carlier , Jakub Kicinski , Sasha Levin +Message-ID: <20260505095450.517892-1-sashal@kernel.org> + +From: David Carlier + +[ Upstream commit dd66b42854705e4e4ee7f14d260f86c578bed3e3 ] + +napi_build_skb() can return NULL on allocation failure. In +__octep_vf_oq_process_rx(), the result is used directly without a NULL +check in both the single-buffer and multi-fragment paths, leading to a +NULL pointer dereference. + +Add NULL checks after both napi_build_skb() calls, properly advancing +descriptors and consuming remaining fragments on failure. + +Fixes: 1cd3b407977c ("octeon_ep_vf: add Tx/Rx processing and interrupt support") +Cc: stable@vger.kernel.org +Signed-off-by: David Carlier +Link: https://patch.msgid.link/20260409184009.930359-3-devnexen@gmail.com +Signed-off-by: Jakub Kicinski +[ inlined missing octep_vf_oq_next_idx() helper as read_idx++ with wraparound ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c | 36 +++++++++++++++- + 1 file changed, 34 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c ++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c +@@ -409,10 +409,17 @@ static int __octep_vf_oq_process_rx(stru + data_offset = OCTEP_VF_OQ_RESP_HW_SIZE; + rx_ol_flags = 0; + } +- rx_bytes += buff_info->len; +- + if (buff_info->len <= oq->max_single_buffer_size) { + skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); ++ if (!skb) { ++ oq->stats->alloc_failures++; ++ desc_used++; ++ read_idx++; ++ if (read_idx == oq->max_count) ++ read_idx = 0; ++ continue; ++ } ++ rx_bytes += buff_info->len; + skb_reserve(skb, data_offset); + skb_put(skb, buff_info->len); + read_idx++; +@@ -424,6 +431,31 @@ static int __octep_vf_oq_process_rx(stru + u16 data_len; + + skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); ++ if (!skb) { ++ oq->stats->alloc_failures++; ++ desc_used++; ++ read_idx++; ++ if (read_idx == oq->max_count) ++ read_idx = 0; ++ data_len = buff_info->len - oq->max_single_buffer_size; ++ while (data_len) { ++ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, ++ PAGE_SIZE, DMA_FROM_DEVICE); ++ buff_info = (struct octep_vf_rx_buffer *) ++ &oq->buff_info[read_idx]; ++ buff_info->page = NULL; ++ if (data_len < oq->buffer_size) ++ data_len = 0; ++ else ++ data_len -= oq->buffer_size; ++ desc_used++; ++ read_idx++; ++ if (read_idx == oq->max_count) ++ read_idx = 0; ++ } ++ continue; ++ } ++ rx_bytes += buff_info->len; + skb_reserve(skb, data_offset); + /* Head fragment includes response header(s); + * subsequent fragments contains only data. diff --git a/queue-6.12/printk-add-print_hex_dump_devel.patch b/queue-6.12/printk-add-print_hex_dump_devel.patch new file mode 100644 index 0000000000..9d095a113d --- /dev/null +++ b/queue-6.12/printk-add-print_hex_dump_devel.patch @@ -0,0 +1,49 @@ +From stable+bounces-244992-greg=kroah.com@vger.kernel.org Sun May 10 04:34:48 2026 +From: Sasha Levin +Date: Sat, 9 May 2026 22:34:41 -0400 +Subject: printk: add print_hex_dump_devel() +To: stable@vger.kernel.org +Cc: Thorsten Blum , Herbert Xu , John Ogness , Sasha Levin +Message-ID: <20260510023442.3940261-1-sashal@kernel.org> + +From: Thorsten Blum + +[ Upstream commit d134feeb5df33fbf77f482f52a366a44642dba09 ] + +Add print_hex_dump_devel() as the hex dump equivalent of pr_devel(), +which emits output only when DEBUG is enabled, but keeps call sites +compiled otherwise. + +Suggested-by: Herbert Xu +Signed-off-by: Thorsten Blum +Reviewed-by: John Ogness +Signed-off-by: Herbert Xu +Stable-dep-of: 177730a273b1 ("crypto: caam - guard HMAC key hex dumps in hash_digest_key") +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/printk.h | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/include/linux/printk.h ++++ b/include/linux/printk.h +@@ -786,6 +786,19 @@ static inline void print_hex_dump_debug( + } + #endif + ++#if defined(DEBUG) ++#define print_hex_dump_devel(prefix_str, prefix_type, rowsize, \ ++ groupsize, buf, len, ascii) \ ++ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ ++ groupsize, buf, len, ascii) ++#else ++static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii) ++{ ++} ++#endif ++ + /** + * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params + * @prefix_str: string to prefix each line with; diff --git a/queue-6.12/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch b/queue-6.12/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch new file mode 100644 index 0000000000..d63e1d2bfd --- /dev/null +++ b/queue-6.12/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch @@ -0,0 +1,269 @@ +From gary@garyguo.net Tue May 12 17:00:58 2026 +From: Gary Guo +Date: Tue, 12 May 2026 16:00:28 +0100 +Subject: rust: pin-init: fix incorrect accessor reference lifetime +To: gregkh@linuxfoundation.org, ojeda@kernel.org +Cc: stable@vger.kernel.org, Gary Guo +Message-ID: <20260512150028.3231198-1-gary@garyguo.net> + +From: Gary Guo + +commit 68bf102226cf2199dc609b67c1e847cad4de4b57 upstream + +When a field has been initialized, `init!`/`pin_init!` create a reference +or pinned reference to the field so it can be accessed later during the +initialization of other fields. However, the reference it created is +incorrectly `&'static` rather than just the scope of the initializer. + +This means that you can do + + init!(Foo { + a: 1, + _: { + let b: &'static u32 = a; + } + }) + +which is unsound. + +This is caused by `&mut (*$slot).$ident`, which actually allows arbitrary +lifetime, so this is effectively `'static`. + +Fix it by adding `let_binding` method on `DropGuard` to shorten lifetime. +This results in exactly what we want for these accessors. The safety and +invariant comments of `DropGuard` have been reworked; instead of reasoning +about what caller can do with the guard, express it in a way that the +ownership is transferred to the guard and `forget` takes it back, so the +unsafe operations within the `DropGuard` can be more easily justified. + +Assisted-by: Claude:claude-3-opus +Signed-off-by: Gary Guo +Signed-off-by: Greg Kroah-Hartman +--- + rust/kernel/init/__internal.rs | 28 ++++++++---- + rust/kernel/init/macros.rs | 91 ++++++++++++++++++++++++----------------- + 2 files changed, 73 insertions(+), 46 deletions(-) + +--- a/rust/kernel/init/__internal.rs ++++ b/rust/kernel/init/__internal.rs +@@ -189,32 +189,42 @@ impl StackInit { + /// When a value of this type is dropped, it drops a `T`. + /// + /// Can be forgotten to prevent the drop. ++/// ++/// # Invariants ++/// ++/// - `ptr` is valid and properly aligned. ++/// - `*ptr` is initialized and owned by this guard. + pub struct DropGuard { + ptr: *mut T, + } + + impl DropGuard { +- /// Creates a new [`DropGuard`]. It will [`ptr::drop_in_place`] `ptr` when it gets dropped. ++ /// Creates a drop guard and transfer the ownership of the pointer content. + /// +- /// # Safety ++ /// The ownership is only relinquished if the guard is forgotten via [`core::mem::forget`]. + /// +- /// `ptr` must be a valid pointer. ++ /// # Safety + /// +- /// It is the callers responsibility that `self` will only get dropped if the pointee of `ptr`: +- /// - has not been dropped, +- /// - is not accessible by any other means, +- /// - will not be dropped by any other means. ++ /// - `ptr` is valid and properly aligned. ++ /// - `*ptr` is initialized, and the ownership is transferred to this guard. + #[inline] + pub unsafe fn new(ptr: *mut T) -> Self { ++ // INVARIANT: By safety requirement. + Self { ptr } + } ++ ++ /// Create a let binding for accessor use. ++ #[inline] ++ pub fn let_binding(&mut self) -> &mut T { ++ // SAFETY: Per type invariant. ++ unsafe { &mut *self.ptr } ++ } + } + + impl Drop for DropGuard { + #[inline] + fn drop(&mut self) { +- // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function +- // ensuring that this operation is safe. ++ // SAFETY: `self.ptr` is valid, properly aligned and `*self.ptr` is owned by this guard. + unsafe { ptr::drop_in_place(self.ptr) } + } + } +--- a/rust/kernel/init/macros.rs ++++ b/rust/kernel/init/macros.rs +@@ -1232,27 +1232,33 @@ macro_rules! __init_internal { + // return when an error/panic occurs. + // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`. + unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? }; +- // NOTE: the field accessor ensures that the initialized field is properly aligned. ++ // NOTE: this ensures that the initialized field is properly aligned. + // Unaligned fields will cause the compiler to emit E0793. We do not support + // unaligned fields since `Init::__init` requires an aligned pointer; the call to + // `ptr::write` below has the same requirement. +- #[allow(unused_variables, unused_assignments)] +- // SAFETY: +- // - the project function does the correct field projection, +- // - the field has been initialized, +- // - the reference is only valid until the end of the initializer. +- let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) }); ++ // SAFETY: the field has been initialized. ++ let _ = unsafe { &mut (*$slot).$field }; + + // Create the drop guard: + // + // We rely on macro hygiene to make it impossible for users to access this local variable. + // We use `paste!` to create new hygiene for `$field`. + ::kernel::macros::paste! { +- // SAFETY: We forget the guard later when initialization has succeeded. +- let [< __ $field _guard >] = unsafe { ++ // SAFETY: ++ // - `addr_of_mut!((*$slot).$field)` is valid. ++ // - `(*$slot).$field` has been initialized above. ++ // - We only need the ownership to the pointee back when initialization has ++ // succeeded, where we `forget` the guard. ++ let mut [< __ $field _guard >] = unsafe { + $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) + }; + ++ // NOTE: The reference is derived from the guard so that it only lives as long as ++ // the guard does and cannot escape the scope. ++ #[allow(unused_variables)] ++ // SAFETY: the project function does the correct field projection. ++ let $field = unsafe { $data.[< __project_ $field >]([< __ $field _guard >].let_binding()) }; ++ + $crate::__init_internal!(init_slot($use_data): + @data($data), + @slot($slot), +@@ -1275,27 +1281,30 @@ macro_rules! __init_internal { + // return when an error/panic occurs. + unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? }; + +- // NOTE: the field accessor ensures that the initialized field is properly aligned. ++ // NOTE: this ensures that the initialized field is properly aligned. + // Unaligned fields will cause the compiler to emit E0793. We do not support + // unaligned fields since `Init::__init` requires an aligned pointer; the call to + // `ptr::write` below has the same requirement. +- #[allow(unused_variables, unused_assignments)] +- // SAFETY: +- // - the field is not structurally pinned, since the line above must compile, +- // - the field has been initialized, +- // - the reference is only valid until the end of the initializer. +- let $field = unsafe { &mut (*$slot).$field }; ++ // SAFETY: the field has been initialized. ++ let _ = unsafe { &mut (*$slot).$field }; + + // Create the drop guard: + // + // We rely on macro hygiene to make it impossible for users to access this local variable. + // We use `paste!` to create new hygiene for `$field`. + ::kernel::macros::paste! { +- // SAFETY: We forget the guard later when initialization has succeeded. +- let [< __ $field _guard >] = unsafe { ++ // SAFETY: ++ // - `addr_of_mut!((*$slot).$field)` is valid. ++ // - `(*$slot).$field` has been initialized above. ++ // - We only need the ownership to the pointee back when initialization has ++ // succeeded, where we `forget` the guard. ++ let mut [< __ $field _guard >] = unsafe { + $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) + }; + ++ #[allow(unused_variables)] ++ let $field = [< __ $field _guard >].let_binding(); ++ + $crate::__init_internal!(init_slot(): + @data($data), + @slot($slot), +@@ -1319,28 +1328,30 @@ macro_rules! __init_internal { + unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) }; + } + +- // NOTE: the field accessor ensures that the initialized field is properly aligned. ++ // NOTE: this ensures that the initialized field is properly aligned. + // Unaligned fields will cause the compiler to emit E0793. We do not support + // unaligned fields since `Init::__init` requires an aligned pointer; the call to + // `ptr::write` below has the same requirement. +- #[allow(unused_variables, unused_assignments)] +- // SAFETY: +- // - the field is not structurally pinned, since no `use_data` was required to create this +- // initializer, +- // - the field has been initialized, +- // - the reference is only valid until the end of the initializer. +- let $field = unsafe { &mut (*$slot).$field }; ++ // SAFETY: the field has been initialized. ++ let _ = unsafe { &mut (*$slot).$field }; + + // Create the drop guard: + // + // We rely on macro hygiene to make it impossible for users to access this local variable. + // We use `paste!` to create new hygiene for `$field`. + ::kernel::macros::paste! { +- // SAFETY: We forget the guard later when initialization has succeeded. +- let [< __ $field _guard >] = unsafe { ++ // SAFETY: ++ // - `addr_of_mut!((*$slot).$field)` is valid. ++ // - `(*$slot).$field` has been initialized above. ++ // - We only need the ownership to the pointee back when initialization has ++ // succeeded, where we `forget` the guard. ++ let mut [< __ $field _guard >] = unsafe { + $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) + }; + ++ #[allow(unused_variables)] ++ let $field = [< __ $field _guard >].let_binding(); ++ + $crate::__init_internal!(init_slot(): + @data($data), + @slot($slot), +@@ -1363,27 +1374,33 @@ macro_rules! __init_internal { + // SAFETY: The memory at `slot` is uninitialized. + unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) }; + } +- // NOTE: the field accessor ensures that the initialized field is properly aligned. ++ // NOTE: this ensures that the initialized field is properly aligned. + // Unaligned fields will cause the compiler to emit E0793. We do not support + // unaligned fields since `Init::__init` requires an aligned pointer; the call to + // `ptr::write` below has the same requirement. +- #[allow(unused_variables, unused_assignments)] +- // SAFETY: +- // - the project function does the correct field projection, +- // - the field has been initialized, +- // - the reference is only valid until the end of the initializer. +- let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) }); ++ // SAFETY: the field has been initialized. ++ let _ = unsafe { &mut (*$slot).$field }; + + // Create the drop guard: + // + // We rely on macro hygiene to make it impossible for users to access this local variable. + // We use `paste!` to create new hygiene for `$field`. + $crate::macros::paste! { +- // SAFETY: We forget the guard later when initialization has succeeded. +- let [< __ $field _guard >] = unsafe { ++ // SAFETY: ++ // - `addr_of_mut!((*$slot).$field)` is valid. ++ // - `(*$slot).$field` has been initialized above. ++ // - We only need the ownership to the pointee back when initialization has ++ // succeeded, where we `forget` the guard. ++ let mut [< __ $field _guard >] = unsafe { + $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field)) + }; + ++ // NOTE: The reference is derived from the guard so that it only lives as long as ++ // the guard does and cannot escape the scope. ++ #[allow(unused_variables)] ++ // SAFETY: the project function does the correct field projection. ++ let $field = unsafe { $data.[< __project_ $field >]([< __ $field _guard >].let_binding()) }; ++ + $crate::__init_internal!(init_slot($use_data): + @data($data), + @slot($slot), diff --git a/queue-6.12/series b/queue-6.12/series index cc683df651..4b79bc728e 100644 --- a/queue-6.12/series +++ b/queue-6.12/series @@ -176,3 +176,30 @@ loongarch-kvm-move-unconditional-delay-into-timer-clear-scenery.patch loongarch-kvm-use-kvm_set_pte-in-kvm_flush_pte.patch loongarch-use-per-root-bridge-pcih-flag-to-skip-mem-resource-fixup.patch bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch +fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch +fs-prepare-for-adding-lsm-blob-to-backing_file.patch +dma-mapping-drop-unneeded-includes-from-dma-mapping.h.patch +dma-mapping-add-__dma_from_device_group_begin-end.patch +hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch +octeon_ep_vf-add-null-check-for-napi_build_skb.patch +mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch +udf-fix-partition-descriptor-append-bookkeeping.patch +mtd-spinand-winbond-declare-the-qe-bit-on-w25nxxjw.patch +hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch +hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch +crypto-nx-migrate-to-scomp-api.patch +crypto-nx-fix-bounce-buffer-leaks-in-nx842_crypto_-alloc-free-_ctx.patch +erofs-move-in-out-pages-into-struct-z_erofs_decompress_req.patch +erofs-tidy-up-z_erofs_lz4_handle_overlap.patch +erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch +gtp-disable-bh-before-calling-udp_tunnel_xmit_skb.patch +printk-add-print_hex_dump_devel.patch +crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch +alsa-aloop-fix-peer-runtime-uaf-during-format-change-stop.patch +net-stmmac-avoid-shadowing-global-buf_sz.patch +net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch +net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch +wifi-mt76-mt7925-fix-incorrect-tlv-length-in-clc-command.patch +tracepoint-balance-regfunc-on-func_add-failure-in-tracepoint_add_func.patch +rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch +kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch diff --git a/queue-6.12/tracepoint-balance-regfunc-on-func_add-failure-in-tracepoint_add_func.patch b/queue-6.12/tracepoint-balance-regfunc-on-func_add-failure-in-tracepoint_add_func.patch new file mode 100644 index 0000000000..b0862dbbaf --- /dev/null +++ b/queue-6.12/tracepoint-balance-regfunc-on-func_add-failure-in-tracepoint_add_func.patch @@ -0,0 +1,56 @@ +From stable+bounces-245057-greg=kroah.com@vger.kernel.org Sun May 10 19:21:37 2026 +From: Sasha Levin +Date: Sun, 10 May 2026 13:20:59 -0400 +Subject: tracepoint: balance regfunc() on func_add() failure in tracepoint_add_func() +To: stable@vger.kernel.org +Cc: David Carlier , Masami Hiramatsu , Mathieu Desnoyers , "Steven Rostedt (Google)" , Sasha Levin +Message-ID: <20260510172059.533474-1-sashal@kernel.org> + +From: David Carlier + +[ Upstream commit fad217e16fded7f3c09f8637b0f6a224d58b5f2e ] + +When a tracepoint goes through the 0 -> 1 transition, tracepoint_add_func() +invokes the subsystem's ext->regfunc() before attempting to install the +new probe via func_add(). If func_add() then fails (for example, when +allocate_probes() cannot allocate a new probe array under memory pressure +and returns -ENOMEM), the function returns the error without calling the +matching ext->unregfunc(), leaving the side effects of regfunc() behind +with no installed probe to justify them. + +For syscall tracepoints this is particularly unpleasant: syscall_regfunc() +bumps sys_tracepoint_refcount and sets SYSCALL_TRACEPOINT on every task. +After a leaked failure, the refcount is stuck at a non-zero value with no +consumer, and every task continues paying the syscall trace entry/exit +overhead until reboot. Other subsystems providing regfunc()/unregfunc() +pairs exhibit similarly scoped persistent state. + +Mirror the existing 1 -> 0 cleanup and call ext->unregfunc() in the +func_add() error path, gated on the same condition used there so the +unwind is symmetric with the registration. + +Fixes: 8cf868affdc4 ("tracing: Have the reg function allow to fail") +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mathieu Desnoyers +Link: https://patch.msgid.link/20260413190601.21993-1-devnexen@gmail.com +Signed-off-by: David Carlier +Signed-off-by: Steven Rostedt (Google) +[ changed `tp->ext->unregfunc` to `tp->unregfunc` to match older struct layout ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + kernel/tracepoint.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/tracepoint.c ++++ b/kernel/tracepoint.c +@@ -337,6 +337,8 @@ static int tracepoint_add_func(struct tr + lockdep_is_held(&tracepoints_mutex)); + old = func_add(&tp_funcs, func, prio); + if (IS_ERR(old)) { ++ if (tp->unregfunc && !static_key_enabled(&tp->key)) ++ tp->unregfunc(); + WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); + return PTR_ERR(old); + } diff --git a/queue-6.12/udf-fix-partition-descriptor-append-bookkeeping.patch b/queue-6.12/udf-fix-partition-descriptor-append-bookkeeping.patch new file mode 100644 index 0000000000..0d6b355648 --- /dev/null +++ b/queue-6.12/udf-fix-partition-descriptor-append-bookkeeping.patch @@ -0,0 +1,61 @@ +From stable+bounces-244820-greg=kroah.com@vger.kernel.org Fri May 8 21:52:28 2026 +From: Sasha Levin +Date: Fri, 8 May 2026 15:52:20 -0400 +Subject: udf: fix partition descriptor append bookkeeping +To: stable@vger.kernel.org +Cc: Seohyeon Maeng , Jan Kara , Sasha Levin +Message-ID: <20260508195220.1878050-1-sashal@kernel.org> + +From: Seohyeon Maeng + +[ Upstream commit 08841b06fa64d8edbd1a21ca6e613420c90cc4b8 ] + +Mounting a crafted UDF image with repeated partition descriptors can +trigger a heap out-of-bounds write in part_descs_loc[]. + +handle_partition_descriptor() deduplicates entries by partition number, +but appended slots never record partnum. As a result duplicate +Partition Descriptors are appended repeatedly and num_part_descs keeps +growing. + +Once the table is full, the growth path still sizes the allocation from +partnum even though inserts are indexed by num_part_descs. If partnum is +already aligned to PART_DESC_ALLOC_STEP, ALIGN(partnum, step) can keep +the old capacity and the next append writes past the end of the table. + +Store partnum in the appended slot and size growth from the next append +count so deduplication and capacity tracking follow the same model. + +Fixes: ee4af50ca94f ("udf: Fix mounting of Win7 created UDF filesystems") +Cc: stable@vger.kernel.org +Signed-off-by: Seohyeon Maeng +Link: https://patch.msgid.link/20260310081652.21220-1-bioloidgp@gmail.com +Signed-off-by: Jan Kara +[ replaced kzalloc_objs() helper with equivalent kcalloc() ] +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + fs/udf/super.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1695,8 +1695,9 @@ static struct udf_vds_record *handle_par + return &(data->part_descs_loc[i].rec); + if (data->num_part_descs >= data->size_part_descs) { + struct part_desc_seq_scan_data *new_loc; +- unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); ++ unsigned int new_size; + ++ new_size = data->num_part_descs + PART_DESC_ALLOC_STEP; + new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); + if (!new_loc) + return ERR_PTR(-ENOMEM); +@@ -1706,6 +1707,7 @@ static struct udf_vds_record *handle_par + data->part_descs_loc = new_loc; + data->size_part_descs = new_size; + } ++ data->part_descs_loc[data->num_part_descs].partnum = partnum; + return &(data->part_descs_loc[data->num_part_descs++].rec); + } + diff --git a/queue-6.12/wifi-mt76-mt7925-fix-incorrect-tlv-length-in-clc-command.patch b/queue-6.12/wifi-mt76-mt7925-fix-incorrect-tlv-length-in-clc-command.patch new file mode 100644 index 0000000000..e509ccbe7b --- /dev/null +++ b/queue-6.12/wifi-mt76-mt7925-fix-incorrect-tlv-length-in-clc-command.patch @@ -0,0 +1,53 @@ +From stable+bounces-245056-greg=kroah.com@vger.kernel.org Sun May 10 19:20:54 2026 +From: Sasha Levin +Date: Sun, 10 May 2026 13:20:48 -0400 +Subject: wifi: mt76: mt7925: fix incorrect TLV length in CLC command +To: stable@vger.kernel.org +Cc: Quan Zhou , Sean Wang , Felix Fietkau , Sasha Levin +Message-ID: <20260510172048.533185-1-sashal@kernel.org> + +From: Quan Zhou + +[ Upstream commit 62e037aa8cf5a69b7ea63336705a35c897b9db2b ] + +The previous implementation of __mt7925_mcu_set_clc() set the TLV length +field (.len) incorrectly during CLC command construction. The length was +initialized as sizeof(req) - 4, regardless of the actual segment length. +This could cause the WiFi firmware to misinterpret the command payload, +resulting in command execution errors. + +This patch moves the TLV length assignment to after the segment is +selected, and sets .len to sizeof(req) + seg->len - 4, matching the +actual command content. This ensures the firmware receives the +correct TLV length and parses the command properly. + +Fixes: c948b5da6bbe ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips") +Cc: stable@vger.kernel.org +Signed-off-by: Quan Zhou +Acked-by: Sean Wang +Link: https://patch.msgid.link/f56ae0e705774dfa8aab3b99e5bbdc92cd93523e.1772011204.git.quan.zhou@mediatek.com +Signed-off-by: Felix Fietkau +Signed-off-by: Sasha Levin +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +@@ -3261,7 +3261,6 @@ __mt7925_mcu_set_clc(struct mt792x_dev * + u8 rsvd[64]; + } __packed req = { + .tag = cpu_to_le16(0x3), +- .len = cpu_to_le16(sizeof(req) - 4), + + .idx = idx, + .env = env_cap, +@@ -3289,6 +3288,7 @@ __mt7925_mcu_set_clc(struct mt792x_dev * + memcpy(req.type, rule->type, 2); + + req.size = cpu_to_le16(seg->len); ++ req.len = cpu_to_le16(sizeof(req) + seg->len - 4); + skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, + le16_to_cpu(req.size) + sizeof(req), + sizeof(req), GFP_KERNEL);