--- /dev/null
+From d5325810814ee995debfa0b6c4a22e0391598bef Mon Sep 17 00:00:00 2001
+From: Francesco Dolcini <francesco.dolcini@toradex.com>
+Date: Fri, 20 Mar 2026 08:30:30 +0100
+Subject: arm64: dts: ti: am62-verdin: Enable pullup for eMMC data pins
+
+From: Francesco Dolcini <francesco.dolcini@toradex.com>
+
+commit d5325810814ee995debfa0b6c4a22e0391598bef upstream.
+
+Verdin AM62 board does not have external pullups on eMMC DAT1-DAT7 pins.
+Enable internal pullups on DAT1-DAT7 considering:
+
+ - without a host-side pullup, these lines rely solely on the eMMC
+ device's internal pullup (R_int, 10kohm-150kohm per JEDEC), which may
+ exceed the recommended 50kohm max for 1.8V VCCQ
+ - JEDEC JESD84-B51 Table 200 requires host-side pullups (R_DAT,
+ 10kohm-100kohm) on all data lines to prevent bus floating
+
+Fixes: 316b80246b16 ("arm64: dts: ti: add verdin am62")
+Cc: stable@vger.kernel.org
+Signed-off-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Link: https://patch.msgid.link/20260320073032.10427-1-francesco@dolcini.it
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+@@ -507,16 +507,16 @@
+ /* On-module eMMC */
+ pinctrl_sdhci0: main-mmc0-default-pins {
+ pinctrl-single,pins = <
+- AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3) MMC0_CMD */
+- AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1) MMC0_CLK */
+- AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2) MMC0_DAT0 */
+- AM62X_IOPAD(0x210, PIN_INPUT, 0) /* (AA1) MMC0_DAT1 */
+- AM62X_IOPAD(0x20c, PIN_INPUT, 0) /* (AA3) MMC0_DAT2 */
+- AM62X_IOPAD(0x208, PIN_INPUT, 0) /* (Y4) MMC0_DAT3 */
+- AM62X_IOPAD(0x204, PIN_INPUT, 0) /* (AB2) MMC0_DAT4 */
+- AM62X_IOPAD(0x200, PIN_INPUT, 0) /* (AC1) MMC0_DAT5 */
+- AM62X_IOPAD(0x1fc, PIN_INPUT, 0) /* (AD2) MMC0_DAT6 */
+- AM62X_IOPAD(0x1f8, PIN_INPUT, 0) /* (AC2) MMC0_DAT7 */
++ AM62X_IOPAD(0x220, PIN_INPUT, 0) /* (Y3) MMC0_CMD */
++ AM62X_IOPAD(0x218, PIN_INPUT, 0) /* (AB1) MMC0_CLK */
++ AM62X_IOPAD(0x214, PIN_INPUT, 0) /* (AA2) MMC0_DAT0 */
++ AM62X_IOPAD(0x210, PIN_INPUT_PULLUP, 0) /* (AA1) MMC0_DAT1 */
++ AM62X_IOPAD(0x20c, PIN_INPUT_PULLUP, 0) /* (AA3) MMC0_DAT2 */
++ AM62X_IOPAD(0x208, PIN_INPUT_PULLUP, 0) /* (Y4) MMC0_DAT3 */
++ AM62X_IOPAD(0x204, PIN_INPUT_PULLUP, 0) /* (AB2) MMC0_DAT4 */
++ AM62X_IOPAD(0x200, PIN_INPUT_PULLUP, 0) /* (AC1) MMC0_DAT5 */
++ AM62X_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
++ AM62X_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
+ >;
+ };
+
--- /dev/null
+From 655ef638a2bc3cd0a9eff99a02f83cab94a3a917 Mon Sep 17 00:00:00 2001
+From: Paul Louvel <paul.louvel@bootlin.com>
+Date: Mon, 30 Mar 2026 12:28:18 +0200
+Subject: crypto: talitos - fix SEC1 32k ahash request limitation
+
+From: Paul Louvel <paul.louvel@bootlin.com>
+
+commit 655ef638a2bc3cd0a9eff99a02f83cab94a3a917 upstream.
+
+Since commit c662b043cdca ("crypto: af_alg/hash: Support
+MSG_SPLICE_PAGES"), the crypto core may pass large scatterlists spanning
+multiple pages to drivers supporting ahash operations. As a result, a
+driver can now receive large ahash requests.
+
+The SEC1 engine has a limitation where a single descriptor cannot
+process more than 32k of data. The current implementation attempts to
+handle the entire request within a single descriptor, which leads to
+failures raised by the driver:
+
+ "length exceeds h/w max limit"
+
+Address this limitation by splitting large ahash requests into multiple
+descriptors, each respecting the 32k hardware limit. This allows
+processing arbitrarily large requests.
+
+Cc: stable@vger.kernel.org
+Fixes: c662b043cdca ("crypto: af_alg/hash: Support MSG_SPLICE_PAGES")
+Signed-off-by: Paul Louvel <paul.louvel@bootlin.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/talitos.c | 216 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 147 insertions(+), 69 deletions(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -12,6 +12,7 @@
+ * All rights reserved.
+ */
+
++#include <linux/workqueue.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+@@ -870,10 +871,18 @@ struct talitos_ahash_req_ctx {
+ unsigned int swinit;
+ unsigned int first;
+ unsigned int last;
++ unsigned int last_request;
+ unsigned int to_hash_later;
+ unsigned int nbuf;
+ struct scatterlist bufsl[2];
+ struct scatterlist *psrc;
++
++ struct scatterlist request_bufsl[2];
++ struct ahash_request *areq;
++ struct scatterlist *request_sl;
++ unsigned int remaining_ahash_request_bytes;
++ unsigned int current_ahash_request_bytes;
++ struct work_struct sec1_ahash_process_remaining;
+ };
+
+ struct talitos_export_state {
+@@ -1759,7 +1768,20 @@ static void ahash_done(struct device *de
+
+ kfree(edesc);
+
+- ahash_request_complete(areq, err);
++ if (err) {
++ ahash_request_complete(areq, err);
++ return;
++ }
++
++ req_ctx->remaining_ahash_request_bytes -=
++ req_ctx->current_ahash_request_bytes;
++
++ if (!req_ctx->remaining_ahash_request_bytes) {
++ ahash_request_complete(areq, 0);
++ return;
++ }
++
++ schedule_work(&req_ctx->sec1_ahash_process_remaining);
+ }
+
+ /*
+@@ -1925,60 +1947,7 @@ static struct talitos_edesc *ahash_edesc
+ nbytes, 0, 0, 0, areq->base.flags, false);
+ }
+
+-static int ahash_init(struct ahash_request *areq)
+-{
+- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+- struct device *dev = ctx->dev;
+- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+- unsigned int size;
+- dma_addr_t dma;
+-
+- /* Initialize the context */
+- req_ctx->buf_idx = 0;
+- req_ctx->nbuf = 0;
+- req_ctx->first = 1; /* first indicates h/w must init its context */
+- req_ctx->swinit = 0; /* assume h/w init of context */
+- size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+- req_ctx->hw_context_size = size;
+-
+- dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+- DMA_TO_DEVICE);
+- dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+-
+- return 0;
+-}
+-
+-/*
+- * on h/w without explicit sha224 support, we initialize h/w context
+- * manually with sha224 constants, and tell it to run sha256.
+- */
+-static int ahash_init_sha224_swinit(struct ahash_request *areq)
+-{
+- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+-
+- req_ctx->hw_context[0] = SHA224_H0;
+- req_ctx->hw_context[1] = SHA224_H1;
+- req_ctx->hw_context[2] = SHA224_H2;
+- req_ctx->hw_context[3] = SHA224_H3;
+- req_ctx->hw_context[4] = SHA224_H4;
+- req_ctx->hw_context[5] = SHA224_H5;
+- req_ctx->hw_context[6] = SHA224_H6;
+- req_ctx->hw_context[7] = SHA224_H7;
+-
+- /* init 64-bit count */
+- req_ctx->hw_context[8] = 0;
+- req_ctx->hw_context[9] = 0;
+-
+- ahash_init(areq);
+- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+-
+- return 0;
+-}
+-
+-static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
++static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes)
+ {
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+@@ -1997,12 +1966,12 @@ static int ahash_process_req(struct ahas
+
+ if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+ /* Buffer up to one whole block */
+- nents = sg_nents_for_len(areq->src, nbytes);
++ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
+ if (nents < 0) {
+ dev_err(dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+- sg_copy_to_buffer(areq->src, nents,
++ sg_copy_to_buffer(req_ctx->request_sl, nents,
+ ctx_buf + req_ctx->nbuf, nbytes);
+ req_ctx->nbuf += nbytes;
+ return 0;
+@@ -2029,7 +1998,7 @@ static int ahash_process_req(struct ahas
+ sg_init_table(req_ctx->bufsl, nsg);
+ sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
+ if (nsg > 1)
+- sg_chain(req_ctx->bufsl, 2, areq->src);
++ sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl);
+ req_ctx->psrc = req_ctx->bufsl;
+ } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ int offset;
+@@ -2038,26 +2007,26 @@ static int ahash_process_req(struct ahas
+ offset = blocksize - req_ctx->nbuf;
+ else
+ offset = nbytes_to_hash - req_ctx->nbuf;
+- nents = sg_nents_for_len(areq->src, offset);
++ nents = sg_nents_for_len(req_ctx->request_sl, offset);
+ if (nents < 0) {
+ dev_err(dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+- sg_copy_to_buffer(areq->src, nents,
++ sg_copy_to_buffer(req_ctx->request_sl, nents,
+ ctx_buf + req_ctx->nbuf, offset);
+ req_ctx->nbuf += offset;
+- req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
++ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl,
+ offset);
+ } else
+- req_ctx->psrc = areq->src;
++ req_ctx->psrc = req_ctx->request_sl;
+
+ if (to_hash_later) {
+- nents = sg_nents_for_len(areq->src, nbytes);
++ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
+ if (nents < 0) {
+ dev_err(dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+- sg_pcopy_to_buffer(areq->src, nents,
++ sg_pcopy_to_buffer(req_ctx->request_sl, nents,
+ req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
+ to_hash_later,
+ nbytes - to_hash_later);
+@@ -2065,7 +2034,7 @@ static int ahash_process_req(struct ahas
+ req_ctx->to_hash_later = to_hash_later;
+
+ /* Allocate extended descriptor */
+- edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
++ edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+@@ -2087,14 +2056,123 @@ static int ahash_process_req(struct ahas
+ if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
++ return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
+ }
+
+-static int ahash_update(struct ahash_request *areq)
++static void sec1_ahash_process_remaining(struct work_struct *work)
+ {
++ struct talitos_ahash_req_ctx *req_ctx =
++ container_of(work, struct talitos_ahash_req_ctx,
++ sec1_ahash_process_remaining);
++ int err = 0;
++
++ req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl,
++ req_ctx->request_sl, TALITOS1_MAX_DATA_LEN);
++
++ if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN)
++ req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN;
++ else {
++ req_ctx->current_ahash_request_bytes =
++ req_ctx->remaining_ahash_request_bytes;
++
++ if (req_ctx->last_request)
++ req_ctx->last = 1;
++ }
++
++ err = ahash_process_req_one(req_ctx->areq,
++ req_ctx->current_ahash_request_bytes);
++
++ if (err != -EINPROGRESS)
++ ahash_request_complete(req_ctx->areq, err);
++}
++
++static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct device *dev = ctx->dev;
++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++ struct talitos_private *priv = dev_get_drvdata(dev);
++ bool is_sec1 = has_ftr_sec1(priv);
++
++ req_ctx->areq = areq;
++ req_ctx->request_sl = areq->src;
++ req_ctx->remaining_ahash_request_bytes = nbytes;
++
++ if (is_sec1) {
++ if (nbytes > TALITOS1_MAX_DATA_LEN)
++ nbytes = TALITOS1_MAX_DATA_LEN;
++ else if (req_ctx->last_request)
++ req_ctx->last = 1;
++ }
++
++ req_ctx->current_ahash_request_bytes = nbytes;
++
++ return ahash_process_req_one(req_ctx->areq,
++ req_ctx->current_ahash_request_bytes);
++}
++
++static int ahash_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++ unsigned int size;
++ dma_addr_t dma;
+
++ /* Initialize the context */
++ req_ctx->buf_idx = 0;
++ req_ctx->nbuf = 0;
++ req_ctx->first = 1; /* first indicates h/w must init its context */
++ req_ctx->swinit = 0; /* assume h/w init of context */
++ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
++ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
++ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
++ req_ctx->hw_context_size = size;
++ req_ctx->last_request = 0;
+ req_ctx->last = 0;
++ INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
++
++ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
++ DMA_TO_DEVICE);
++ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
++
++ return 0;
++}
++
++/*
++ * on h/w without explicit sha224 support, we initialize h/w context
++ * manually with sha224 constants, and tell it to run sha256.
++ */
++static int ahash_init_sha224_swinit(struct ahash_request *areq)
++{
++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++
++ req_ctx->hw_context[0] = SHA224_H0;
++ req_ctx->hw_context[1] = SHA224_H1;
++ req_ctx->hw_context[2] = SHA224_H2;
++ req_ctx->hw_context[3] = SHA224_H3;
++ req_ctx->hw_context[4] = SHA224_H4;
++ req_ctx->hw_context[5] = SHA224_H5;
++ req_ctx->hw_context[6] = SHA224_H6;
++ req_ctx->hw_context[7] = SHA224_H7;
++
++ /* init 64-bit count */
++ req_ctx->hw_context[8] = 0;
++ req_ctx->hw_context[9] = 0;
++
++ ahash_init(areq);
++ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
++
++ return 0;
++}
++
++static int ahash_update(struct ahash_request *areq)
++{
++ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++
++ req_ctx->last_request = 0;
+
+ return ahash_process_req(areq, areq->nbytes);
+ }
+@@ -2103,7 +2181,7 @@ static int ahash_final(struct ahash_requ
+ {
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+- req_ctx->last = 1;
++ req_ctx->last_request = 1;
+
+ return ahash_process_req(areq, 0);
+ }
+@@ -2112,7 +2190,7 @@ static int ahash_finup(struct ahash_requ
+ {
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+- req_ctx->last = 1;
++ req_ctx->last_request = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+ }
--- /dev/null
+From a1b80018b8cec27fc06a8b04a7f8b5f6cfe86eae Mon Sep 17 00:00:00 2001
+From: Paul Louvel <paul.louvel@bootlin.com>
+Date: Mon, 30 Mar 2026 12:28:19 +0200
+Subject: crypto: talitos - rename first/last to first_desc/last_desc
+
+From: Paul Louvel <paul.louvel@bootlin.com>
+
+commit a1b80018b8cec27fc06a8b04a7f8b5f6cfe86eae upstream.
+
+Previous commit introduces a new last_request variable in the context
+structure.
+
+Renaming the first/last existing member variable in the context
+structure to improve readability.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Paul Louvel <paul.louvel@bootlin.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/talitos.c | 46 +++++++++++++++++++++++-----------------------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -869,8 +869,8 @@ struct talitos_ahash_req_ctx {
+ u8 buf[2][HASH_MAX_BLOCK_SIZE];
+ int buf_idx;
+ unsigned int swinit;
+- unsigned int first;
+- unsigned int last;
++ unsigned int first_desc;
++ unsigned int last_desc;
+ unsigned int last_request;
+ unsigned int to_hash_later;
+ unsigned int nbuf;
+@@ -889,8 +889,8 @@ struct talitos_export_state {
+ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+ u8 buf[HASH_MAX_BLOCK_SIZE];
+ unsigned int swinit;
+- unsigned int first;
+- unsigned int last;
++ unsigned int first_desc;
++ unsigned int last_desc;
+ unsigned int to_hash_later;
+ unsigned int nbuf;
+ };
+@@ -1722,7 +1722,7 @@ static void common_nonsnoop_hash_unmap(s
+ if (desc->next_desc &&
+ desc->ptr[5].ptr != desc2->ptr[5].ptr)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+- if (req_ctx->last)
++ if (req_ctx->last_desc)
+ memcpy(areq->result, req_ctx->hw_context,
+ crypto_ahash_digestsize(tfm));
+
+@@ -1759,7 +1759,7 @@ static void ahash_done(struct device *de
+ container_of(desc, struct talitos_edesc, desc);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+- if (!req_ctx->last && req_ctx->to_hash_later) {
++ if (!req_ctx->last_desc && req_ctx->to_hash_later) {
+ /* Position any partial block for next update/final/finup */
+ req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
+ req_ctx->nbuf = req_ctx->to_hash_later;
+@@ -1825,7 +1825,7 @@ static int common_nonsnoop_hash(struct t
+ /* first DWORD empty */
+
+ /* hash context in */
+- if (!req_ctx->first || req_ctx->swinit) {
++ if (!req_ctx->first_desc || req_ctx->swinit) {
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+@@ -1833,7 +1833,7 @@ static int common_nonsnoop_hash(struct t
+ req_ctx->swinit = 0;
+ }
+ /* Indicate next op is not the first. */
+- req_ctx->first = 0;
++ req_ctx->first_desc = 0;
+
+ /* HMAC key */
+ if (ctx->keylen)
+@@ -1866,7 +1866,7 @@ static int common_nonsnoop_hash(struct t
+ /* fifth DWORD empty */
+
+ /* hash/HMAC out -or- hash context out */
+- if (req_ctx->last)
++ if (req_ctx->last_desc)
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ crypto_ahash_digestsize(tfm),
+ req_ctx->hw_context, DMA_FROM_DEVICE);
+@@ -1908,7 +1908,7 @@ static int common_nonsnoop_hash(struct t
+ if (sg_count > 1)
+ sync_needed = true;
+ copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+- if (req_ctx->last)
++ if (req_ctx->last_desc)
+ map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context,
+@@ -1964,7 +1964,7 @@ static int ahash_process_req_one(struct
+ bool is_sec1 = has_ftr_sec1(priv);
+ u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
+
+- if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
++ if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) {
+ /* Buffer up to one whole block */
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
+ if (nents < 0) {
+@@ -1981,7 +1981,7 @@ static int ahash_process_req_one(struct
+ nbytes_to_hash = nbytes + req_ctx->nbuf;
+ to_hash_later = nbytes_to_hash & (blocksize - 1);
+
+- if (req_ctx->last)
++ if (req_ctx->last_desc)
+ to_hash_later = 0;
+ else if (to_hash_later)
+ /* There is a partial block. Hash the full block(s) now */
+@@ -2041,19 +2041,19 @@ static int ahash_process_req_one(struct
+ edesc->desc.hdr = ctx->desc_hdr_template;
+
+ /* On last one, request SEC to pad; otherwise continue */
+- if (req_ctx->last)
++ if (req_ctx->last_desc)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+ else
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+ /* request SEC to INIT hash. */
+- if (req_ctx->first && !req_ctx->swinit)
++ if (req_ctx->first_desc && !req_ctx->swinit)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+ /* When the tfm context has a keylen, it's an HMAC.
+ * A first or last (ie. not middle) descriptor must request HMAC.
+ */
+- if (ctx->keylen && (req_ctx->first || req_ctx->last))
++ if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+ return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
+@@ -2076,7 +2076,7 @@ static void sec1_ahash_process_remaining
+ req_ctx->remaining_ahash_request_bytes;
+
+ if (req_ctx->last_request)
+- req_ctx->last = 1;
++ req_ctx->last_desc = 1;
+ }
+
+ err = ahash_process_req_one(req_ctx->areq,
+@@ -2103,7 +2103,7 @@ static int ahash_process_req(struct ahas
+ if (nbytes > TALITOS1_MAX_DATA_LEN)
+ nbytes = TALITOS1_MAX_DATA_LEN;
+ else if (req_ctx->last_request)
+- req_ctx->last = 1;
++ req_ctx->last_desc = 1;
+ }
+
+ req_ctx->current_ahash_request_bytes = nbytes;
+@@ -2124,14 +2124,14 @@ static int ahash_init(struct ahash_reque
+ /* Initialize the context */
+ req_ctx->buf_idx = 0;
+ req_ctx->nbuf = 0;
+- req_ctx->first = 1; /* first indicates h/w must init its context */
++ req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ req_ctx->last_request = 0;
+- req_ctx->last = 0;
++ req_ctx->last_desc = 0;
+ INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+@@ -2223,8 +2223,8 @@ static int ahash_export(struct ahash_req
+ req_ctx->hw_context_size);
+ memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
+ export->swinit = req_ctx->swinit;
+- export->first = req_ctx->first;
+- export->last = req_ctx->last;
++ export->first_desc = req_ctx->first_desc;
++ export->last_desc = req_ctx->last_desc;
+ export->to_hash_later = req_ctx->to_hash_later;
+ export->nbuf = req_ctx->nbuf;
+
+@@ -2249,8 +2249,8 @@ static int ahash_import(struct ahash_req
+ memcpy(req_ctx->hw_context, export->hw_context, size);
+ memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+ req_ctx->swinit = export->swinit;
+- req_ctx->first = export->first;
+- req_ctx->last = export->last;
++ req_ctx->first_desc = export->first_desc;
++ req_ctx->last_desc = export->last_desc;
+ req_ctx->to_hash_later = export->to_hash_later;
+ req_ctx->nbuf = export->nbuf;
+
--- /dev/null
+From 96bd3e76a171a8e21a6387e54e4c420a81968492 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:34:10 +0000
+Subject: KVM: nSVM: Add missing consistency check for EFER, CR0, CR4, and CS
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 96bd3e76a171a8e21a6387e54e4c420a81968492 upstream.
+
+According to the APM Volume #2, 15.5, Canonicalization and Consistency
+Checks (24593—Rev. 3.42—March 2024), the following condition (among
+others) results in a #VMEXIT with VMEXIT_INVALID (aka SVM_EXIT_ERR):
+
+ EFER.LME, CR0.PG, CR4.PAE, CS.L, and CS.D are all non-zero.
+
+In the list of consistency checks done when EFER.LME and CR0.PG are set,
+add a check that CS.L and CS.D are not both set, after the existing
+check that CR4.PAE is set.
+
+This is functionally a nop because the nested VMRUN results in
+SVM_EXIT_ERR in HW, which is forwarded to L1, but KVM makes all
+consistency checks before a VMRUN is actually attempted.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-17-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 6 ++++++
+ arch/x86/kvm/svm/svm.h | 1 +
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -304,6 +304,10 @@ static bool __nested_vmcb_check_save(str
+ CC(!(save->cr0 & X86_CR0_PE)) ||
+ CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
+ return false;
++
++ if (CC((save->cs.attrib & SVM_SELECTOR_L_MASK) &&
++ (save->cs.attrib & SVM_SELECTOR_DB_MASK)))
++ return false;
+ }
+
+ /* Note, SVM doesn't have any additional restrictions on CR4. */
+@@ -390,6 +394,8 @@ static void __nested_copy_vmcb_save_to_c
+ * Copy only fields that are validated, as we need them
+ * to avoid TOC/TOU races.
+ */
++ to->cs = from->cs;
++
+ to->efer = from->efer;
+ to->cr0 = from->cr0;
+ to->cr3 = from->cr3;
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -115,6 +115,7 @@ struct kvm_vmcb_info {
+ };
+
+ struct vmcb_save_area_cached {
++ struct vmcb_seg cs;
+ u64 efer;
+ u64 cr4;
+ u64 cr3;
--- /dev/null
+From b71138fcc362c67ebe66747bb22cb4e6b4d6a651 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:34:09 +0000
+Subject: KVM: nSVM: Add missing consistency check for nCR3 validity
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit b71138fcc362c67ebe66747bb22cb4e6b4d6a651 upstream.
+
+From the APM Volume #2, 15.25.4 (24593—Rev. 3.42—March 2024):
+
+ When VMRUN is executed with nested paging enabled (NP_ENABLE = 1), the
+ following conditions are considered illegal state combinations, in
+ addition to those mentioned in “Canonicalization and Consistency Checks”:
+ • Any MBZ bit of nCR3 is set.
+ • Any G_PAT.PA field has an unsupported type encoding or any
+ reserved field in G_PAT has a nonzero value.
+
+Add the consistency check for nCR3 being a legal GPA with no MBZ bits
+set. Note, the G_PAT.PA check is being handled separately[*].
+
+Link: https://lore.kernel.org/kvm/20260205214326.1029278-3-jmattson@google.com [*]
+Fixes: 4b16184c1cca ("KVM: SVM: Initialize Nested Nested MMU context on VMRUN")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-16-yosry@kernel.org
+[sean: capture everything in CC(), massage changelog formatting]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -265,6 +265,10 @@ static bool __nested_vmcb_check_controls
+ if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
+ return false;
+
++ if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
++ !kvm_vcpu_is_legal_gpa(vcpu, control->nested_cr3)))
++ return false;
++
+ if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
+ MSRPM_SIZE)))
+ return false;
--- /dev/null
+From 01ddcdc55e097ca38c28ae656711b8e6d1df71f8 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:33:59 +0000
+Subject: KVM: nSVM: Always inject a #GP if mapping VMCB12 fails on nested VMRUN
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 01ddcdc55e097ca38c28ae656711b8e6d1df71f8 upstream.
+
+nested_svm_vmrun() currently only injects a #GP if kvm_vcpu_map() fails
+with -EINVAL. But it could also fail with -EFAULT if creating a host
+mapping failed. Inject a #GP in all cases, no reason to treat failure
+modes differently.
+
+Fixes: 8c5fbf1a7231 ("KVM/nSVM: Use the new mapping API for mapping guest memory")
+CC: stable@vger.kernel.org
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-6-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -863,12 +863,9 @@ int nested_svm_vmrun(struct kvm_vcpu *vc
+ }
+
+ vmcb12_gpa = svm->vmcb->save.rax;
+- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
+- if (ret == -EINVAL) {
++ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+- } else if (ret) {
+- return kvm_skip_emulated_instruction(vcpu);
+ }
+
+ ret = kvm_skip_emulated_instruction(vcpu);
--- /dev/null
+From 69b721a86d0dcb026f6db7d111dcde7550442d2e Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:34:05 +0000
+Subject: KVM: nSVM: Clear EVENTINJ fields in vmcb12 on nested #VMEXIT
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 69b721a86d0dcb026f6db7d111dcde7550442d2e upstream.
+
+According to the APM, from the reference of the VMRUN instruction:
+
+ Upon #VMEXIT, the processor performs the following actions in order to
+ return to the host execution context:
+
+ ...
+
+ clear EVENTINJ field in VMCB
+
+KVM already syncs EVENTINJ fields from vmcb02 to cached vmcb12 on every
+L2->L0 #VMEXIT. Since these fields are zeroed by the CPU on #VMEXIT, they
+will mostly be zeroed in vmcb12 on nested #VMEXIT by nested_svm_vmexit().
+
+However, this is not the case when:
+
+ 1. Consistency checks fail, as nested_svm_vmexit() is not called.
+ 2. Entering guest mode fails before L2 runs (e.g. due to failed load of
+ CR3).
+
+(2) was broken by commit 2d8a42be0e2b ("KVM: nSVM: synchronize VMCB
+controls updated by the processor on every vmexit"), as prior to that
+nested_svm_vmexit() always zeroed EVENTINJ fields.
+
+Explicitly clear the fields in all nested #VMEXIT code paths.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Fixes: 2d8a42be0e2b ("KVM: nSVM: synchronize VMCB controls updated by the processor on every vmexit")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-12-yosry@kernel.org
+[sean: massage changelog formatting]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -884,6 +884,8 @@ int nested_svm_vmrun(struct kvm_vcpu *vc
+ vmcb12->control.exit_code_hi = -1u;
+ vmcb12->control.exit_info_1 = 0;
+ vmcb12->control.exit_info_2 = 0;
++ vmcb12->control.event_inj = 0;
++ vmcb12->control.event_inj_err = 0;
+ svm_set_gif(svm, false);
+ goto out;
+ }
+@@ -1025,9 +1027,9 @@ int nested_svm_vmexit(struct vcpu_svm *s
+ if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
+ vmcb12->control.next_rip = vmcb02->control.next_rip;
+
++ vmcb12->control.event_inj = 0;
++ vmcb12->control.event_inj_err = 0;
+ vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
+- vmcb12->control.event_inj = svm->nested.ctl.event_inj;
+- vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
+
+ if (!kvm_pause_in_guest(vcpu->kvm)) {
+ vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
--- /dev/null
+From f85a6ce06e4a0d49652f57967a649ab09e06287c Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:34:04 +0000
+Subject: KVM: nSVM: Clear GIF on nested #VMEXIT(INVALID)
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit f85a6ce06e4a0d49652f57967a649ab09e06287c upstream.
+
+According to the APM, GIF is set to 0 on any #VMEXIT, including
+an #VMEXIT(INVALID) due to failed consistency checks. Clear GIF on
+consistency check failures.
+
+Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-11-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -884,6 +884,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vc
+ vmcb12->control.exit_code_hi = -1u;
+ vmcb12->control.exit_info_1 = 0;
+ vmcb12->control.exit_info_2 = 0;
++ svm_set_gif(svm, false);
+ goto out;
+ }
+
--- /dev/null
+From 8998e1d012f3f45d0456f16706682cef04c3c436 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 3 Mar 2026 00:34:06 +0000
+Subject: KVM: nSVM: Clear tracking of L1->L2 NMI and soft IRQ on nested #VMEXIT
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 8998e1d012f3f45d0456f16706682cef04c3c436 upstream.
+
+KVM clears tracking of L1->L2 injected NMIs (i.e. nmi_l1_to_l2) and soft
+IRQs (i.e. soft_int_injected) on a synthesized #VMEXIT(INVALID) due to
+failed VMRUN. However, they are not explicitly cleared in other
+synthesized #VMEXITs.
+
+soft_int_injected is always cleared after the first VMRUN of L2 when
+completing interrupts, as any re-injection is then tracked by KVM
+(instead of purely in vmcb02).
+
+nmi_l1_to_l2 is not cleared after the first VMRUN if NMI injection
+failed, as KVM still needs to keep track that the NMI originated from L1
+to avoid blocking NMIs for L1. It is only cleared when the NMI injection
+succeeds.
+
+KVM could synthesize a #VMEXIT to L1 before successfully injecting the
+NMI into L2 (e.g. due to a #NPF on L2's NMI handler in L1's NPTs). In
+this case, nmi_l1_to_l2 will remain true, and KVM may not correctly mask
+NMIs and intercept IRET when injecting an NMI into L1.
+
+Clear both nmi_l1_to_l2 and soft_int_injected in nested_svm_vmexit(), i.e.
+for all #VMEXITs except those that occur due to failed consistency checks,
+as those happen before nmi_l1_to_l2 or soft_int_injected are set.
+
+Fixes: 159fc6fa3b7d ("KVM: nSVM: Transparently handle L1 -> L2 NMI re-injection")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260303003421.2185681-13-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -913,8 +913,6 @@ int nested_svm_vmrun(struct kvm_vcpu *vc
+
+ out_exit_err:
+ svm->nested.nested_run_pending = 0;
+- svm->nmi_l1_to_l2 = false;
+- svm->soft_int_injected = false;
+
+ svm->vmcb->control.exit_code = SVM_EXIT_ERR;
+ svm->vmcb->control.exit_code_hi = -1u;
+@@ -1154,6 +1152,10 @@ int nested_svm_vmexit(struct vcpu_svm *s
+ if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
+ kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
+
++ /* Drop tracking for L1->L2 injected NMIs and soft IRQs */
++ svm->nmi_l1_to_l2 = false;
++ svm->soft_int_injected = false;
++
+ /*
+ * Un-inhibit the AVIC right away, so that other vCPUs can start
+ * to benefit from it right away.
--- /dev/null
+From 24f7d36b824b65cf1a2db3db478059187b2a37b0 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Tue, 24 Feb 2026 22:50:17 +0000
+Subject: KVM: nSVM: Ensure AVIC is inhibited when restoring a vCPU to guest mode
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 24f7d36b824b65cf1a2db3db478059187b2a37b0 upstream.
+
+On nested VMRUN, KVM ensures AVIC is inhibited by requesting
+KVM_REQ_APICV_UPDATE, triggering a check of inhibit reasons, finding
+APICV_INHIBIT_REASON_NESTED, and disabling AVIC.
+
+However, when KVM_SET_NESTED_STATE is performed on a vCPU not in guest
+mode with AVIC enabled, KVM_REQ_APICV_UPDATE is not requested, and AVIC
+is not inhibited.
+
+Request KVM_REQ_APICV_UPDATE in the KVM_SET_NESTED_STATE path if AVIC is
+active, similar to the nested VMRUN path.
+
+Fixes: f44509f849fe ("KVM: x86: SVM: allow AVIC to co-exist with a nested guest running")
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260224225017.3303870-1-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1774,6 +1774,9 @@ static int svm_set_nested_state(struct k
+
+ svm->nested.force_msr_bitmap_recalc = true;
+
++ if (kvm_vcpu_apicv_active(vcpu))
++ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
++
+ kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+ ret = 0;
+ out_free:
--- /dev/null
+From e63fb1379f4b9300a44739964e69549bebbcdca4 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Tue, 10 Feb 2026 01:08:06 +0000
+Subject: KVM: nSVM: Mark all of vmcb02 dirty when restoring nested state
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+commit e63fb1379f4b9300a44739964e69549bebbcdca4 upstream.
+
+When restoring a vCPU in guest mode, any state restored before
+KVM_SET_NESTED_STATE (e.g. KVM_SET_SREGS) will mark the corresponding
+dirty bits in vmcb01, as it is the active VMCB before switching to
+vmcb02 in svm_set_nested_state().
+
+Hence, mark all fields in vmcb02 dirty in svm_set_nested_state() to
+capture any previously restored fields.
+
+Fixes: cc440cdad5b7 ("KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE")
+CC: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20260210010806.3204289-1-yosry.ahmed@linux.dev
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1753,6 +1753,12 @@ static int svm_set_nested_state(struct k
+ nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
+
+ /*
++ * Any previously restored state (e.g. KVM_SET_SREGS) would mark fields
++ * dirty in vmcb01 instead of vmcb02, so mark all of vmcb02 dirty here.
++ */
++ vmcb_mark_all_dirty(svm->vmcb);
++
++ /*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
--- /dev/null
+From 03bee264f8ebfd39e0254c98e112d033a7aa9055 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Wed, 25 Feb 2026 00:59:44 +0000
+Subject: KVM: nSVM: Sync interrupt shadow to cached vmcb12 after VMRUN of L2
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 03bee264f8ebfd39e0254c98e112d033a7aa9055 upstream.
+
+After VMRUN in guest mode, nested_sync_control_from_vmcb02() syncs
+fields written by the CPU from vmcb02 to the cached vmcb12. This is
+because the cached vmcb12 is used as the authoritative copy of some of
+the controls, and is the payload when saving/restoring nested state.
+
+int_state is also written by the CPU, specifically bit 0 (i.e.
+SVM_INTERRUPT_SHADOW_MASK) for nested VMs, but it is not sync'd to
+cached vmcb12. This does not cause a problem if KVM_SET_NESTED_STATE
+preceeds KVM_SET_VCPU_EVENTS in the restore path, as an interrupt shadow
+would be correctly restored to vmcb02 (KVM_SET_VCPU_EVENTS overwrites
+what KVM_SET_NESTED_STATE restored in int_state).
+
+However, if KVM_SET_VCPU_EVENTS preceeds KVM_SET_NESTED_STATE, an
+interrupt shadow would be restored into vmcb01 instead of vmcb02. This
+would mostly be benign for L1 (delays an interrupt), but not for L2. For
+L2, the vCPU could hang (e.g. if a wakeup interrupt is delivered before
+a HLT that should have been in an interrupt shadow).
+
+Sync int_state to the cached vmcb12 in nested_sync_control_from_vmcb02()
+to avoid this problem. With that, KVM_SET_NESTED_STATE restores the
+correct interrupt shadow state, and if KVM_SET_VCPU_EVENTS follows it
+would overwrite it with the same value.
+
+Fixes: cc440cdad5b7 ("KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE")
+CC: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260225005950.3739782-3-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -412,6 +412,7 @@ void nested_sync_control_from_vmcb02(str
+ u32 mask;
+ svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
+ svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
++ svm->nested.ctl.int_state = svm->vmcb->control.int_state;
+
+ /* Only a few fields of int_ctl are written by the processor. */
+ mask = V_IRQ_MASK | V_TPR_MASK;
--- /dev/null
+From 778d8c1b2a6ffe622ddcd3bb35b620e6e41f4da0 Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry@kernel.org>
+Date: Wed, 25 Feb 2026 00:59:43 +0000
+Subject: KVM: nSVM: Sync NextRIP to cached vmcb12 after VMRUN of L2
+
+From: Yosry Ahmed <yosry@kernel.org>
+
+commit 778d8c1b2a6ffe622ddcd3bb35b620e6e41f4da0 upstream.
+
+After VMRUN in guest mode, nested_sync_control_from_vmcb02() syncs
+fields written by the CPU from vmcb02 to the cached vmcb12. This is
+because the cached vmcb12 is used as the authoritative copy of some of
+the controls, and is the payload when saving/restoring nested state.
+
+NextRIP is also written by the CPU (in some cases) after VMRUN, but is
+not sync'd to the cached vmcb12. As a result, it is corrupted after
+save/restore (replaced by the original value written by L1 on nested
+VMRUN). This could cause problems for both KVM (e.g. when injecting a
+soft IRQ) or L1 (e.g. when using NextRIP to advance RIP after emulating
+an instruction).
+
+Fix this by sync'ing NextRIP to the cache after VMRUN of L2, but only
+after completing interrupts (not in nested_sync_control_from_vmcb02()),
+as KVM may update NextRIP (e.g. when re-injecting a soft IRQ).
+
+Fixes: cc440cdad5b7 ("KVM: nSVM: implement KVM_GET_NESTED_STATE and KVM_SET_NESTED_STATE")
+CC: stable@vger.kernel.org
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Yosry Ahmed <yosry@kernel.org>
+Link: https://patch.msgid.link/20260225005950.3739782-2-yosry@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4339,6 +4339,16 @@ static __no_kcsan fastpath_t svm_vcpu_ru
+
+ svm_complete_interrupts(vcpu);
+
++ /*
++ * Update the cache after completing interrupts to get an accurate
++ * NextRIP, e.g. when re-injecting a soft interrupt.
++ *
++ * FIXME: Rework svm_get_nested_state() to not pull data from the
++ * cache (except for maybe int_ctl).
++ */
++ if (is_guest_mode(vcpu))
++ svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
++
+ return svm_exit_handlers_fastpath(vcpu);
+ }
+
--- /dev/null
+From 5c247d08bc81bbad4c662dcf5654137a2f8483ec Mon Sep 17 00:00:00 2001
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+Date: Tue, 3 Feb 2026 20:10:10 +0000
+Subject: KVM: nSVM: Use vcpu->arch.cr2 when updating vmcb12 on nested #VMEXIT
+
+From: Yosry Ahmed <yosry.ahmed@linux.dev>
+
+commit 5c247d08bc81bbad4c662dcf5654137a2f8483ec upstream.
+
+KVM currently uses the value of CR2 from vmcb02 to update vmcb12 on
+nested #VMEXIT. This value is incorrect in some cases, causing L1 to run
+L2 with a corrupted CR2. This could lead to segfaults or data corruption
+if L2 is in the middle of handling a #PF and reads a corrupted CR2. Use
+the correct value in vcpu->arch.cr2 instead.
+
+The value in vcpu->arch.cr2 is sync'd to vmcb02 shortly before a VMRUN
+of L2, and sync'd back to vcpu->arch.cr2 shortly after. The value are
+only out-of-sync in two cases: after save+restore, and after a #PF is
+injected into L2. In either case, if a #VMEXIT to L1 is synthesized
+before L2 runs, using the value in vmcb02 would be incorrect.
+
+After save+restore, the value of CR2 is restored by KVM_SET_SREGS into
+vcpu->arch.cr2. It is not reflect in vmcb02 until a VMRUN of L2. Before
+that, it holds whatever was in vmcb02 before restore, which would be
+zero on a new vCPU that never ran nested. If a #VMEXIT to L1 is
+synthesized before L2 ever runs, using vcpu->arch.cr2 to update vmcb12
+is the right thing to do.
+
+The #PF injection case is more nuanced. Although the APM is a bit
+unclear about when CR2 is written during a #PF, the SDM is more clear:
+
+ Processors update CR2 whenever a page fault is detected. If a
+ second page fault occurs while an earlier page fault is being
+ delivered, the faulting linear address of the second fault will
+ overwrite the contents of CR2 (replacing the previous address).
+ These updates to CR2 occur even if the page fault results in a
+ double fault or occurs during the delivery of a double fault.
+
+KVM injecting the exception surely counts as the #PF being "detected".
+More importantly, when an exception is injected into L2 at the time of a
+synthesized #VMEXIT, KVM updates exit_int_info in vmcb12 accordingly,
+such that an L1 hypervisor can re-inject the exception. If CR2 is not
+written at that point, the L1 hypervisor have no way of correctly
+re-injecting the #PF. Hence, if a #VMEXIT to L1 is synthesized after
+the #PF is injected into L2 but before it actually runs, using
+vcpu->arch.cr2 to update vmcb12 is also the right thing to do.
+
+Note that KVM does _not_ update vcpu->arch.cr2 when a #PF is pending for
+L2, only when it is injected. The distinction is important, because only
+injected (but not intercepted) exceptions are propagated to L1 through
+exit_int_info. It would be incorrect to update CR2 in vmcb12 for a
+pending #PF, as L1 would perceive an updated CR2 value with no #PF.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20260203201010.1871056-1-yosry.ahmed@linux.dev
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -1005,7 +1005,7 @@ int nested_svm_vmexit(struct vcpu_svm *s
+ vmcb12->save.efer = svm->vcpu.arch.efer;
+ vmcb12->save.cr0 = kvm_read_cr0(vcpu);
+ vmcb12->save.cr3 = kvm_read_cr3(vcpu);
+- vmcb12->save.cr2 = vmcb02->save.cr2;
++ vmcb12->save.cr2 = vcpu->arch.cr2;
+ vmcb12->save.cr4 = svm->vcpu.arch.cr4;
+ vmcb12->save.rflags = kvm_get_rflags(vcpu);
+ vmcb12->save.rip = kvm_rip_read(vcpu);
--- /dev/null
+From d5bde6113aed8315a2bfe708730b721be9c2f48b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 18 Feb 2026 15:09:51 -0800
+Subject: KVM: SVM: Explicitly mark vmcb01 dirty after modifying VMCB intercepts
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d5bde6113aed8315a2bfe708730b721be9c2f48b upstream.
+
+When reacting to an intercept update, explicitly mark vmcb01's intercepts
+dirty, as KVM always initially operates on vmcb01, and nested_svm_vmexit()
+isn't guaranteed to mark VMCB_INTERCEPTS as dirty. I.e. if L2 is active,
+KVM will modify the intercepts for L1, but might not mark them as dirty
+before the next VMRUN of L1.
+
+Fixes: 116a0a23676e ("KVM: SVM: Add clean-bit for intercetps, tsc-offset and pause filter count")
+Cc: stable@vger.kernel.org
+Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20260218230958.2877682-2-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/nested.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -129,11 +129,13 @@ void recalc_intercepts(struct vcpu_svm *
+ struct vmcb_ctrl_area_cached *g;
+ unsigned int i;
+
+- vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
++ vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_INTERCEPTS);
+
+ if (!is_guest_mode(&svm->vcpu))
+ return;
+
++ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
++
+ c = &svm->vmcb->control;
+ h = &svm->vmcb01.ptr->control;
+ g = &svm->nested.ctl;
--- /dev/null
+From d99df02ff427f461102230f9c5b90a6c64ee8e23 Mon Sep 17 00:00:00 2001
+From: Kevin Cheng <chengkev@google.com>
+Date: Sat, 28 Feb 2026 03:33:26 +0000
+Subject: KVM: SVM: Inject #UD for INVLPGA if EFER.SVME=0
+
+From: Kevin Cheng <chengkev@google.com>
+
+commit d99df02ff427f461102230f9c5b90a6c64ee8e23 upstream.
+
+INVLPGA should cause a #UD when EFER.SVME is not set. Add a check to
+properly inject #UD when EFER.SVME=0.
+
+Fixes: ff092385e828 ("KVM: SVM: Implement INVLPGA")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kevin Cheng <chengkev@google.com>
+Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20260228033328.2285047-3-chengkev@google.com
+[sean: tag for stable@]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2494,6 +2494,9 @@ static int invlpga_interception(struct k
+ gva_t gva = kvm_rax_read(vcpu);
+ u32 asid = kvm_rcx_read(vcpu);
+
++ if (nested_svm_check_permissions(vcpu))
++ return 1;
++
+ /* FIXME: Handle an address size prefix. */
+ if (!is_long_mode(vcpu))
+ gva = (u32)gva;
--- /dev/null
+From d0ad1b05bbe6f8da159a4dfb6692b3b7ce30ccc8 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 17 Feb 2026 16:54:38 -0800
+Subject: KVM: x86: Defer non-architectural deliver of exception payload to userspace read
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d0ad1b05bbe6f8da159a4dfb6692b3b7ce30ccc8 upstream.
+
+When attempting to play nice with userspace that hasn't enabled
+KVM_CAP_EXCEPTION_PAYLOAD, defer KVM's non-architectural delivery of the
+payload until userspace actually reads relevant vCPU state, and more
+importantly, force delivery of the payload in *all* paths where userspace
+saves relevant vCPU state, not just KVM_GET_VCPU_EVENTS.
+
+Ignoring userspace save/restore for the moment, delivering the payload
+before the exception is injected is wrong regardless of whether L1 or L2
+is running. To make matters even more confusing, the flaw *currently*
+being papered over by the !is_guest_mode() check isn't even the same bug
+that commit da998b46d244 ("kvm: x86: Defer setting of CR2 until #PF
+delivery") was trying to avoid.
+
+At the time of commit da998b46d244, KVM didn't correctly handle exception
+intercepts, as KVM would wait until VM-Entry into L2 was imminent to check
+if the queued exception should morph to a nested VM-Exit. I.e. KVM would
+deliver the payload to L2 and then synthesize a VM-Exit into L1. But the
+payload was only the most blatant issue, e.g. waiting to check exception
+intercepts would also lead to KVM incorrectly escalating a
+should-be-intercepted #PF into a #DF.
+
+That underlying bug was eventually fixed by commit 7709aba8f716 ("KVM: x86:
+Morph pending exceptions to pending VM-Exits at queue time"), but in the
+interim, commit a06230b62b89 ("KVM: x86: Deliver exception payload on
+KVM_GET_VCPU_EVENTS") came along and subtly added another dependency on
+the !is_guest_mode() check.
+
+While not recorded in the changelog, the motivation for deferring the
+!exception_payload_enabled delivery was to fix a flaw where a synthesized
+MTF (Monitor Trap Flag) VM-Exit would drop a pending #DB and clobber DR6.
+On a VM-Exit, VMX CPUs save pending #DB information into the VMCS, which
+is emulated by KVM in nested_vmx_update_pending_dbg() by grabbing the
+payload from the queue/pending exception. I.e. prematurely delivering the
+payload would cause the pending #DB to not be recorded in the VMCS, and of
+course, clobber L2's DR6 as seen by L1.
+
+Jumping back to save+restore, the quirked behavior of forcing delivery of
+the payload only works if userspace does KVM_GET_VCPU_EVENTS *before*
+CR2 or DR6 is saved, i.e. before KVM_GET_SREGS{,2} and KVM_GET_DEBUGREGS.
+E.g. if userspace does KVM_GET_SREGS before KVM_GET_VCPU_EVENTS, then the
+CR2 saved by userspace won't contain the payload for the exception save by
+KVM_GET_VCPU_EVENTS.
+
+Deliberately deliver the payload in the store_regs() path, as it's the
+least awful option even though userspace may not be doing save+restore.
+Because if userspace _is_ doing save restore, it could elide KVM_GET_SREGS
+knowing that SREGS were already saved when the vCPU exited.
+
+Link: https://lore.kernel.org/all/20200207103608.110305-1-oupton@google.com
+Cc: Yosry Ahmed <yosry.ahmed@linux.dev>
+Cc: stable@vger.kernel.org
+Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Tested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
+Link: https://patch.msgid.link/20260218005438.2619063-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c | 62 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 39 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -695,9 +695,6 @@ static void kvm_multiple_exception(struc
+ vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.has_payload = has_payload;
+ vcpu->arch.exception.payload = payload;
+- if (!is_guest_mode(vcpu))
+- kvm_deliver_exception_payload(vcpu,
+- &vcpu->arch.exception);
+ return;
+ }
+
+@@ -5147,18 +5144,8 @@ static int kvm_vcpu_ioctl_x86_set_mce(st
+ return 0;
+ }
+
+-static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
+- struct kvm_vcpu_events *events)
++static struct kvm_queued_exception *kvm_get_exception_to_save(struct kvm_vcpu *vcpu)
+ {
+- struct kvm_queued_exception *ex;
+-
+- process_nmi(vcpu);
+-
+-#ifdef CONFIG_KVM_SMM
+- if (kvm_check_request(KVM_REQ_SMI, vcpu))
+- process_smi(vcpu);
+-#endif
+-
+ /*
+ * KVM's ABI only allows for one exception to be migrated. Luckily,
+ * the only time there can be two queued exceptions is if there's a
+@@ -5169,21 +5156,46 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_
+ if (vcpu->arch.exception_vmexit.pending &&
+ !vcpu->arch.exception.pending &&
+ !vcpu->arch.exception.injected)
+- ex = &vcpu->arch.exception_vmexit;
+- else
+- ex = &vcpu->arch.exception;
++ return &vcpu->arch.exception_vmexit;
++
++ return &vcpu->arch.exception;
++}
++
++static void kvm_handle_exception_payload_quirk(struct kvm_vcpu *vcpu)
++{
++ struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
+
+ /*
+- * In guest mode, payload delivery should be deferred if the exception
+- * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
+- * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
+- * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
+- * propagate the payload and so it cannot be safely deferred. Deliver
+- * the payload if the capability hasn't been requested.
++ * If KVM_CAP_EXCEPTION_PAYLOAD is disabled, then (prematurely) deliver
++ * the pending exception payload when userspace saves *any* vCPU state
++ * that interacts with exception payloads to avoid breaking userspace.
++ *
++ * Architecturally, KVM must not deliver an exception payload until the
++ * exception is actually injected, e.g. to avoid losing pending #DB
++ * information (which VMX tracks in the VMCS), and to avoid clobbering
++ * state if the exception is never injected for whatever reason. But
++ * if KVM_CAP_EXCEPTION_PAYLOAD isn't enabled, then userspace may or
++ * may not propagate the payload across save+restore, and so KVM can't
++ * safely defer delivery of the payload.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled &&
+ ex->pending && ex->has_payload)
+ kvm_deliver_exception_payload(vcpu, ex);
++}
++
++static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
++ struct kvm_vcpu_events *events)
++{
++ struct kvm_queued_exception *ex = kvm_get_exception_to_save(vcpu);
++
++ process_nmi(vcpu);
++
++#ifdef CONFIG_KVM_SMM
++ if (kvm_check_request(KVM_REQ_SMI, vcpu))
++ process_smi(vcpu);
++#endif
++
++ kvm_handle_exception_payload_quirk(vcpu);
+
+ memset(events, 0, sizeof(*events));
+
+@@ -5364,6 +5376,8 @@ static void kvm_vcpu_ioctl_x86_get_debug
+ {
+ unsigned long val;
+
++ kvm_handle_exception_payload_quirk(vcpu);
++
+ memset(dbgregs, 0, sizeof(*dbgregs));
+ memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+ kvm_get_dr(vcpu, 6, &val);
+@@ -11396,6 +11410,8 @@ static void __get_sregs_common(struct kv
+ if (vcpu->arch.guest_state_protected)
+ goto skip_protected_regs;
+
++ kvm_handle_exception_payload_quirk(vcpu);
++
+ kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+ kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+ kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
--- /dev/null
+From 049a57421dd67a28c45ae7e92c36df758033e5fa Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 29 Mar 2026 08:23:05 -0700
+Subject: mm/damon/core: use time_in_range_open() for damos quota window start
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 049a57421dd67a28c45ae7e92c36df758033e5fa upstream.
+
+damos_adjust_quota() uses time_after_eq() to show if it is time to start a
+new quota charge window, comparing the current jiffies and the scheduled
+next charge window start time. If it is, the next charge window start
+time is updated and the new charge window starts.
+
+The time check and next window start time update is skipped while the
+scheme is deactivated by the watermarks. Let's suppose the deactivation
+is kept more than LONG_MAX jiffies (assuming CONFIG_HZ of 250, more than
+99 days in 32 bit systems and more than one billion years in 64 bit
+systems), resulting in having the jiffies larger than the next charge
+window start time + LONG_MAX. Then, the time_after_eq() call can return
+false until another LONG_MAX jiffies are passed.
+
+This means the scheme can continue working after being reactivated by the
+watermarks. But, soon, the quota will be exceeded and the scheme will
+again effectively stop working until the next charge window starts.
+Because the current charge window is extended to up to LONG_MAX jiffies,
+however, it will look like it stopped unexpectedly and indefinitely, from
+the user's perspective.
+
+Fix this by using !time_in_range_open() instead.
+
+The issue was discovered [1] by sashiko.
+
+Link: https://lore.kernel.org/20260329152306.45796-1-sj@kernel.org
+Link: https://lore.kernel.org/20260324040722.57944-1-sj@kernel.org [1]
+Fixes: ee801b7dd782 ("mm/damon/schemes: activate schemes based on a watermarks mechanism")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 5.16.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1048,7 +1048,8 @@ static void damos_adjust_quota(struct da
+ quota->charged_from = jiffies;
+
+ /* New charge window starts */
+- if (time_after_eq(jiffies, quota->charged_from +
++ if (!time_in_range_open(jiffies, quota->charged_from,
++ quota->charged_from +
+ msecs_to_jiffies(quota->reset_interval))) {
+ if (quota->esz && quota->charged_sz >= quota->esz)
+ s->stat.qt_exceeds++;
--- /dev/null
+From c7c6d4f5103864f73ee3a78bfd6da241f84197dd Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Wed, 25 Mar 2026 08:49:47 -0500
+Subject: mmc: block: use single block write in retry
+
+From: Bin Liu <b-liu@ti.com>
+
+commit c7c6d4f5103864f73ee3a78bfd6da241f84197dd upstream.
+
+Due to errata i2493[0], multi-block write would still fail in retries.
+
+With i2493, the MMC interface has the potential of write failures when
+issuing multi-block writes operating in HS200 mode with excessive IO
+supply noise.
+
+While the errata provides guidance in hardware design and layout to
+minimize the IO supply noise, in theory the write failure cannot be
+resolved in hardware. The software solution to ensure the data integrity
+is to add minimum 5us delay between block writes. Single-block write is
+the practical way to introduce the delay.
+
+This patch reuses recovery_mode flag, and switches to single-block
+write in retry when multi-block write fails. It covers both CQE and
+non-CQE cases.
+
+[0] https://www.ti.com/lit/pdf/sprz582
+Cc: stable@vger.kernel.org
+Suggested-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Bin Liu <b-liu@ti.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c | 12 ++++++++++--
+ drivers/mmc/core/queue.h | 3 +++
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1378,6 +1378,9 @@ static void mmc_blk_data_prep(struct mmc
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
+
++ if (mqrq->flags & MQRQ_XFER_SINGLE_BLOCK)
++ recovery_mode = 1;
++
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+
+ mmc_crypto_prepare_req(mqrq);
+@@ -1517,10 +1520,13 @@ static void mmc_blk_cqe_complete_rq(stru
+ err = 0;
+
+ if (err) {
+- if (mqrq->retries++ < MMC_CQE_RETRIES)
++ if (mqrq->retries++ < MMC_CQE_RETRIES) {
++ if (rq_data_dir(req) == WRITE)
++ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+ blk_mq_requeue_request(req, true);
+- else
++ } else {
+ blk_mq_end_request(req, BLK_STS_IOERR);
++ }
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+@@ -2058,6 +2064,8 @@ static void mmc_blk_mq_complete_rq(struc
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
++ if (rq_data_dir(req) == WRITE)
++ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+--- a/drivers/mmc/core/queue.h
++++ b/drivers/mmc/core/queue.h
+@@ -61,6 +61,8 @@ enum mmc_drv_op {
+ MMC_DRV_OP_GET_EXT_CSD,
+ };
+
++#define MQRQ_XFER_SINGLE_BLOCK BIT(0)
++
+ struct mmc_queue_req {
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+@@ -69,6 +71,7 @@ struct mmc_queue_req {
+ void *drv_op_data;
+ unsigned int ioc_count;
+ int retries;
++ u32 flags;
+ };
+
+ struct mmc_queue {
--- /dev/null
+From 6546a49bbe656981d99a389195560999058c89c4 Mon Sep 17 00:00:00 2001
+From: Shawn Lin <shawn.lin@rock-chips.com>
+Date: Wed, 8 Apr 2026 15:18:49 +0800
+Subject: mmc: sdhci-of-dwcmshc: Disable clock before DLL configuration
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+commit 6546a49bbe656981d99a389195560999058c89c4 upstream.
+
+According to the ASIC design recommendations, the clock must be
+disabled before operating the DLL to prevent glitches that could
+affect the internal digital logic. In extreme cases, failing to
+do so may cause the controller to malfunction completely.
+
+Adds a step to disable the clock before DLL configuration and
+re-enables it at the end.
+
+Fixes: 08f3dff799d4 ("mmc: sdhci-of-dwcmshc: add rockchip platform support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-of-dwcmshc.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -243,12 +243,15 @@ static void dwcmshc_rk3568_set_clock(str
+ extra &= ~BIT(0);
+ sdhci_writel(host, extra, reg);
+
++ /* Disable clock while config DLL */
++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
++
+ if (clock <= 52000000) {
+ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
+ dev_err(mmc_dev(host->mmc),
+ "Can't reduce the clock below 52MHz in HS200/HS400 mode");
+- return;
++ goto enable_clk;
+ }
+
+ /*
+@@ -268,7 +271,7 @@ static void dwcmshc_rk3568_set_clock(str
+ DLL_STRBIN_DELAY_NUM_SEL |
+ DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
+- return;
++ goto enable_clk;
+ }
+
+ /* Reset DLL */
+@@ -295,7 +298,7 @@ static void dwcmshc_rk3568_set_clock(str
+ 500 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(mmc_dev(host->mmc), "DLL lock timeout!\n");
+- return;
++ goto enable_clk;
+ }
+
+ extra = 0x1 << 16 | /* tune clock stop en */
+@@ -328,6 +331,16 @@ static void dwcmshc_rk3568_set_clock(str
+ DLL_STRBIN_TAPNUM_DEFAULT |
+ DLL_STRBIN_TAPNUM_FROM_SW;
+ sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
++
++enable_clk:
++ /*
++ * The sdclk frequency select bits in SDHCI_CLOCK_CONTROL are not functional
++ * on Rockchip's SDHCI implementation. Instead, the clock frequency is fully
++ * controlled via external clk provider by calling clk_set_rate(). Consequently,
++ * passing 0 to sdhci_enable_clk() only re-enables the already-configured clock,
++ * which matches the hardware's actual behavior.
++ */
++ sdhci_enable_clk(host, 0);
+ }
+
+ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
--- /dev/null
+From 37beb42560165869838e7d91724f3e629db64129 Mon Sep 17 00:00:00 2001
+From: Ryan Roberts <ryan.roberts@arm.com>
+Date: Tue, 3 Mar 2026 15:08:38 +0000
+Subject: randomize_kstack: Maintain kstack_offset per task
+
+From: Ryan Roberts <ryan.roberts@arm.com>
+
+commit 37beb42560165869838e7d91724f3e629db64129 upstream.
+
+kstack_offset was previously maintained per-cpu, but this caused a
+couple of issues. So let's instead make it per-task.
+
+Issue 1: add_random_kstack_offset() and choose_random_kstack_offset()
+expected and required to be called with interrupts and preemption
+disabled so that it could manipulate per-cpu state. But arm64, loongarch
+and risc-v are calling them with interrupts and preemption enabled. I
+don't _think_ this causes any functional issues, but it's certainly
+unexpected and could lead to manipulating the wrong cpu's state, which
+could cause a minor performance degradation due to bouncing the cache
+lines. By maintaining the state per-task those functions can safely be
+called in preemptible context.
+
+Issue 2: add_random_kstack_offset() is called before executing the
+syscall and expands the stack using a previously chosen random offset.
+choose_random_kstack_offset() is called after executing the syscall and
+chooses and stores a new random offset for the next syscall. With
+per-cpu storage for this offset, an attacker could force cpu migration
+during the execution of the syscall and prevent the offset from being
+updated for the original cpu such that it is predictable for the next
+syscall on that cpu. By maintaining the state per-task, this problem
+goes away because the per-task random offset is updated after the
+syscall regardless of which cpu it is executing on.
+
+Fixes: 39218ff4c625 ("stack: Optionally randomize kernel stack offset each syscall")
+Closes: https://lore.kernel.org/all/dd8c37bc-795f-4c7a-9086-69e584d8ab24@arm.com/
+Cc: stable@vger.kernel.org
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Link: https://patch.msgid.link/20260303150840.3789438-2-ryan.roberts@arm.com
+Signed-off-by: Kees Cook <kees@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/randomize_kstack.h | 26 +++++++++++++++-----------
+ include/linux/sched.h | 4 ++++
+ init/main.c | 1 -
+ kernel/fork.c | 2 ++
+ 4 files changed, 21 insertions(+), 12 deletions(-)
+
+--- a/include/linux/randomize_kstack.h
++++ b/include/linux/randomize_kstack.h
+@@ -9,7 +9,6 @@
+
+ DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+ randomize_kstack_offset);
+-DECLARE_PER_CPU(u32, kstack_offset);
+
+ /*
+ * Do not use this anywhere else in the kernel. This is used here because
+@@ -44,15 +43,14 @@ DECLARE_PER_CPU(u32, kstack_offset);
+ * add_random_kstack_offset - Increase stack utilization by previously
+ * chosen random offset
+ *
+- * This should be used in the syscall entry path when interrupts and
+- * preempt are disabled, and after user registers have been stored to
+- * the stack. For testing the resulting entropy, please see:
+- * tools/testing/selftests/lkdtm/stack-entropy.sh
++ * This should be used in the syscall entry path after user registers have been
++ * stored to the stack. Preemption may be enabled. For testing the resulting
++ * entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh
+ */
+ #define add_random_kstack_offset() do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+- u32 offset = raw_cpu_read(kstack_offset); \
++ u32 offset = current->kstack_offset; \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
+ /* Keep allocation even after "ptr" loses scope. */ \
+ asm volatile("" :: "r"(ptr) : "memory"); \
+@@ -63,9 +61,9 @@ DECLARE_PER_CPU(u32, kstack_offset);
+ * choose_random_kstack_offset - Choose the random offset for the next
+ * add_random_kstack_offset()
+ *
+- * This should only be used during syscall exit when interrupts and
+- * preempt are disabled. This position in the syscall flow is done to
+- * frustrate attacks from userspace attempting to learn the next offset:
++ * This should only be used during syscall exit. Preemption may be enabled. This
++ * position in the syscall flow is done to frustrate attacks from userspace
++ * attempting to learn the next offset:
+ * - Maximize the timing uncertainty visible from userspace: if the
+ * offset is chosen at syscall entry, userspace has much more control
+ * over the timing between choosing offsets. "How long will we be in
+@@ -79,14 +77,20 @@ DECLARE_PER_CPU(u32, kstack_offset);
+ #define choose_random_kstack_offset(rand) do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+- u32 offset = raw_cpu_read(kstack_offset); \
++ u32 offset = current->kstack_offset; \
+ offset = ror32(offset, 5) ^ (rand); \
+- raw_cpu_write(kstack_offset, offset); \
++ current->kstack_offset = offset; \
+ } \
+ } while (0)
++
++static inline void random_kstack_task_init(struct task_struct *tsk)
++{
++ tsk->kstack_offset = 0;
++}
+ #else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+ #define add_random_kstack_offset() do { } while (0)
+ #define choose_random_kstack_offset(rand) do { } while (0)
++#define random_kstack_task_init(tsk) do { } while (0)
+ #endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+
+ #endif
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1498,6 +1498,10 @@ struct task_struct {
+ unsigned long prev_lowest_stack;
+ #endif
+
++#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
++ u32 kstack_offset;
++#endif
++
+ #ifdef CONFIG_X86_MCE
+ void __user *mce_vaddr;
+ __u64 mce_kflags;
+--- a/init/main.c
++++ b/init/main.c
+@@ -816,7 +816,6 @@ static inline void initcall_debug_enable
+ #ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+ DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+ randomize_kstack_offset);
+-DEFINE_PER_CPU(u32, kstack_offset);
+
+ static int __init early_randomize_kstack_offset(char *buf)
+ {
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -93,6 +93,7 @@
+ #include <linux/thread_info.h>
+ #include <linux/stackleak.h>
+ #include <linux/kasan.h>
++#include <linux/randomize_kstack.h>
+ #include <linux/scs.h>
+ #include <linux/io_uring.h>
+ #include <linux/bpf.h>
+@@ -2517,6 +2518,7 @@ __latent_entropy struct task_struct *cop
+ if (retval)
+ goto bad_fork_cleanup_io;
+
++ random_kstack_task_init(p);
+ stackleak_task_init(p);
+
+ if (pid != &init_struct_pid) {
--- /dev/null
+From 30c4d2f26bb3538c328035cea2e6265c8320539e Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 7 Apr 2026 14:27:17 +0200
+Subject: rtc: ntxec: fix OF node reference imbalance
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 30c4d2f26bb3538c328035cea2e6265c8320539e upstream.
+
+The driver reuses the OF node of the parent multi-function device but
+fails to take another reference to balance the one dropped by the
+platform bus code when unbinding the MFD and deregistering the child
+devices.
+
+Fix this by using the intended helper for reusing OF nodes.
+
+Fixes: 435af89786c6 ("rtc: New driver for RTC in Netronix embedded controller")
+Cc: stable@vger.kernel.org # 5.13
+Cc: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260407122717.2676774-1-johan@kernel.org
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rtc/rtc-ntxec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rtc/rtc-ntxec.c
++++ b/drivers/rtc/rtc-ntxec.c
+@@ -110,7 +110,7 @@ static int ntxec_rtc_probe(struct platfo
+ struct rtc_device *dev;
+ struct ntxec_rtc *rtc;
+
+- pdev->dev.of_node = pdev->dev.parent->of_node;
++ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
tpm-avoid-wunused-but-set-variable.patch
loongarch-show-cpu-vulnerabilites-correctly.patch
power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch
+randomize_kstack-maintain-kstack_offset-per-task.patch
+mmc-block-use-single-block-write-in-retry.patch
+mmc-sdhci-of-dwcmshc-disable-clock-before-dll-configuration.patch
+arm64-dts-ti-am62-verdin-enable-pullup-for-emmc-data-pins.patch
+crypto-talitos-fix-sec1-32k-ahash-request-limitation.patch
+crypto-talitos-rename-first-last-to-first_desc-last_desc.patch
+tpm-tpm_tis-add-error-logging-for-data-transfer.patch
+tpm-tpm_tis-stop-transmit-if-retries-are-exhausted.patch
+rtc-ntxec-fix-of-node-reference-imbalance.patch
+mm-damon-core-use-time_in_range_open-for-damos-quota-window-start.patch
+userfaultfd-allow-registration-of-ranges-below-mmap_min_addr.patch
+kvm-x86-defer-non-architectural-deliver-of-exception-payload-to-userspace-read.patch
+kvm-nsvm-mark-all-of-vmcb02-dirty-when-restoring-nested-state.patch
+kvm-nsvm-sync-nextrip-to-cached-vmcb12-after-vmrun-of-l2.patch
+kvm-nsvm-sync-interrupt-shadow-to-cached-vmcb12-after-vmrun-of-l2.patch
+kvm-svm-inject-ud-for-invlpga-if-efer.svme-0.patch
+kvm-svm-explicitly-mark-vmcb01-dirty-after-modifying-vmcb-intercepts.patch
+kvm-nsvm-ensure-avic-is-inhibited-when-restoring-a-vcpu-to-guest-mode.patch
+kvm-nsvm-use-vcpu-arch.cr2-when-updating-vmcb12-on-nested-vmexit.patch
+kvm-nsvm-always-inject-a-gp-if-mapping-vmcb12-fails-on-nested-vmrun.patch
+kvm-nsvm-clear-gif-on-nested-vmexit-invalid.patch
+kvm-nsvm-clear-eventinj-fields-in-vmcb12-on-nested-vmexit.patch
+kvm-nsvm-clear-tracking-of-l1-l2-nmi-and-soft-irq-on-nested-vmexit.patch
+kvm-nsvm-add-missing-consistency-check-for-efer-cr0-cr4-and-cs.patch
+kvm-nsvm-add-missing-consistency-check-for-ncr3-validity.patch
--- /dev/null
+From 0471921e2d1043dcc6de5cffb49dd37709521abe Mon Sep 17 00:00:00 2001
+From: Jacqueline Wong <jacqwong@google.com>
+Date: Wed, 15 Apr 2026 16:00:05 +0000
+Subject: tpm: tpm_tis: add error logging for data transfer
+
+From: Jacqueline Wong <jacqwong@google.com>
+
+commit 0471921e2d1043dcc6de5cffb49dd37709521abe upstream.
+
+Add logging to more easily determine reason for transmit failure
+
+Cc: stable@vger.kernel.org # v6.6+
+Fixes: 280db21e153d8 ("tpm_tis: Resend command to recover from data transfer errors")
+Signed-off-by: Jacqueline Wong <jacqwong@google.com>
+Signed-off-by: Jordan Hand <jhand@google.com>
+Link: https://lore.kernel.org/r/20260415160006.2275325-2-jacqwong@google.com
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm_tis_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -472,6 +472,8 @@ static int tpm_tis_send_data(struct tpm_
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
++ dev_err(&chip->dev, "TPM_STS_DATA_EXPECT should be set. sts = 0x%08x\n",
++ status);
+ goto out_err;
+ }
+ }
+@@ -492,6 +494,8 @@ static int tpm_tis_send_data(struct tpm_
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
++ dev_err(&chip->dev, "TPM_STS_DATA_EXPECT should be unset. sts = 0x%08x\n",
++ status);
+ goto out_err;
+ }
+
--- /dev/null
+From 949692da7211572fac419b2986b6abc0cd1aeb76 Mon Sep 17 00:00:00 2001
+From: Jacqueline Wong <jacqwong@google.com>
+Date: Wed, 15 Apr 2026 16:00:06 +0000
+Subject: tpm: tpm_tis: stop transmit if retries are exhausted
+
+From: Jacqueline Wong <jacqwong@google.com>
+
+commit 949692da7211572fac419b2986b6abc0cd1aeb76 upstream.
+
+tpm_tis_send_main() will attempt to retry sending data TPM_RETRY times.
+Currently, if those retries are exhausted, the driver will attempt to
+call execute. The TPM will be in the wrong state, leading to the
+operation simply timing out.
+
+Instead, if there is still an error after retries are exhausted, return
+that error immediately.
+
+Cc: stable@vger.kernel.org # v6.6+
+Fixes: 280db21e153d8 ("tpm_tis: Resend command to recover from data transfer errors")
+Signed-off-by: Jacqueline Wong <jacqwong@google.com>
+Signed-off-by: Jordan Hand <jhand@google.com>
+Link: https://lore.kernel.org/r/20260415160006.2275325-3-jacqwong@google.com
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm_tis_core.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -557,11 +557,16 @@ static int tpm_tis_send_main(struct tpm_
+ break;
+ else if (rc != -EAGAIN && rc != -EIO)
+ /* Data transfer failed, not recoverable */
+- return rc;
++ goto out_err;
+
+ usleep_range(priv->timeout_min, priv->timeout_max);
+ }
+
++ if (rc == -EAGAIN || rc == -EIO) {
++ dev_err(&chip->dev, "Exhausted %d tpm_tis_send_data retries\n", TPM_RETRY);
++ goto out_err;
++ }
++
+ /* go and do it */
+ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
+ if (rc < 0)
--- /dev/null
+From 161ce69c2c89781784b945d8e281ff2da9dede9c Mon Sep 17 00:00:00 2001
+From: "Denis M. Karpov" <komlomal@gmail.com>
+Date: Thu, 9 Apr 2026 13:33:45 +0300
+Subject: userfaultfd: allow registration of ranges below mmap_min_addr
+
+From: Denis M. Karpov <komlomal@gmail.com>
+
+commit 161ce69c2c89781784b945d8e281ff2da9dede9c upstream.
+
+The current implementation of validate_range() in fs/userfaultfd.c
+performs a hard check against mmap_min_addr. This is redundant because
+UFFDIO_REGISTER operates on memory ranges that must already be backed by a
+VMA.
+
+Enforcing mmap_min_addr or capability checks again in userfaultfd is
+unnecessary and prevents applications like binary compilers from using
+UFFD for valid memory regions mapped by application.
+
+Remove the redundant check for mmap_min_addr.
+
+We started using UFFD instead of the classic mprotect approach in the
+binary translator to track application writes. During development, we
+encountered this bug. The translator cannot control where the translated
+application chooses to map its memory and if the app requires a
+low-address area, UFFD fails, whereas mprotect would work just fine. I
+believe this is a genuine logic bug rather than an improvement, and I
+would appreciate including the fix in stable.
+
+Link: https://lore.kernel.org/20260409103345.15044-1-komlomal@gmail.com
+Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
+Signed-off-by: Denis M. Karpov <komlomal@gmail.com>
+Reviewed-by: Lorenzo Stoakes <ljs@kernel.org>
+Acked-by: Harry Yoo (Oracle) <harry@kernel.org>
+Reviewed-by: Pedro Falcato <pfalcato@suse.de>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jann Horn <jannh@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/userfaultfd.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1295,8 +1295,6 @@ static __always_inline int validate_unal
+ return -EINVAL;
+ if (!len)
+ return -EINVAL;
+- if (start < mmap_min_addr)
+- return -EINVAL;
+ if (start >= task_size)
+ return -EINVAL;
+ if (len > task_size - start)