--- /dev/null
+From 6f64f866aa1ae6975c95d805ed51d7e9433a0016 Mon Sep 17 00:00:00 2001
+From: Min Li <min15.li@samsung.com>
+Date: Thu, 29 Jun 2023 14:25:17 +0000
+Subject: block: add check that partition length needs to be aligned with block size
+
+From: Min Li <min15.li@samsung.com>
+
+commit 6f64f866aa1ae6975c95d805ed51d7e9433a0016 upstream.
+
+Before calling add partition or resize partition, there is no check
+on whether the length is aligned with the logical block size.
+If the logical block size of the disk is larger than 512 bytes,
+then the partition size maybe not the multiple of the logical block size,
+and when the last sector is read, bio_truncate() will adjust the bio size,
+resulting in an IO error if the size of the read command is smaller than
+the logical block size.If integrity data is supported, this will also
+result in a null pointer dereference when calling bio_integrity_free.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Min Li <min15.li@samsung.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20230629142517.121241-1-min15.li@samsung.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Ashwin Dayanand Kamat <ashwin.kamat@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/ioctl.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -17,7 +17,7 @@ static int blkpg_do_ioctl(struct block_d
+ struct blkpg_partition __user *upart, int op)
+ {
+ struct blkpg_partition p;
+- long long start, length;
++ sector_t start, length;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+@@ -32,6 +32,12 @@ static int blkpg_do_ioctl(struct block_d
+ if (op == BLKPG_DEL_PARTITION)
+ return bdev_del_partition(bdev, p.pno);
+
++ if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
++ return -EINVAL;
++ /* Check that the partition is aligned to the block size */
++ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
++ return -EINVAL;
++
+ start = p.start >> SECTOR_SHIFT;
+ length = p.length >> SECTOR_SHIFT;
+
+@@ -46,9 +52,6 @@ static int blkpg_do_ioctl(struct block_d
+
+ switch (op) {
+ case BLKPG_ADD_PARTITION:
+- /* check if partition is aligned to blocksize */
+- if (p.start & (bdev_logical_block_size(bdev) - 1))
+- return -EINVAL;
+ return bdev_add_partition(bdev, p.pno, start, length);
+ case BLKPG_RESIZE_PARTITION:
+ return bdev_resize_partition(bdev, p.pno, start, length);
--- /dev/null
+From 803de9000f334b771afacb6ff3e78622916668b0 Mon Sep 17 00:00:00 2001
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Wed, 21 Feb 2024 12:43:58 +0100
+Subject: mm, vmscan: prevent infinite loop for costly GFP_NOIO | __GFP_RETRY_MAYFAIL allocations
+
+From: Vlastimil Babka <vbabka@suse.cz>
+
+commit 803de9000f334b771afacb6ff3e78622916668b0 upstream.
+
+Sven reports an infinite loop in __alloc_pages_slowpath() for costly order
+__GFP_RETRY_MAYFAIL allocations that are also GFP_NOIO. Such combination
+can happen in a suspend/resume context where a GFP_KERNEL allocation can
+have __GFP_IO masked out via gfp_allowed_mask.
+
+Quoting Sven:
+
+1. try to do a "costly" allocation (order > PAGE_ALLOC_COSTLY_ORDER)
+ with __GFP_RETRY_MAYFAIL set.
+
+2. page alloc's __alloc_pages_slowpath tries to get a page from the
+ freelist. This fails because there is nothing free of that costly
+ order.
+
+3. page alloc tries to reclaim by calling __alloc_pages_direct_reclaim,
+ which bails out because a zone is ready to be compacted; it pretends
+ to have made a single page of progress.
+
+4. page alloc tries to compact, but this always bails out early because
+ __GFP_IO is not set (it's not passed by the snd allocator, and even
+ if it were, we are suspending so the __GFP_IO flag would be cleared
+ anyway).
+
+5. page alloc believes reclaim progress was made (because of the
+ pretense in item 3) and so it checks whether it should retry
+ compaction. The compaction retry logic thinks it should try again,
+ because:
+ a) reclaim is needed because of the early bail-out in item 4
+ b) a zonelist is suitable for compaction
+
+6. goto 2. indefinite stall.
+
+(end quote)
+
+The immediate root cause is confusing the COMPACT_SKIPPED returned from
+__alloc_pages_direct_compact() (step 4) due to lack of __GFP_IO to be
+indicating a lack of order-0 pages, and in step 5 evaluating that in
+should_compact_retry() as a reason to retry, before incrementing and
+limiting the number of retries. There are however other places that
+wrongly assume that compaction can happen while we lack __GFP_IO.
+
+To fix this, introduce gfp_compaction_allowed() to abstract the __GFP_IO
+evaluation and switch the open-coded test in try_to_compact_pages() to use
+it.
+
+Also use the new helper in:
+- compaction_ready(), which will make reclaim not bail out in step 3, so
+ there's at least one attempt to actually reclaim, even if chances are
+ small for a costly order
+- in_reclaim_compaction() which will make should_continue_reclaim()
+ return false and we don't over-reclaim unnecessarily
+- in __alloc_pages_slowpath() to set a local variable can_compact,
+ which is then used to avoid retrying reclaim/compaction for costly
+ allocations (step 5) if we can't compact and also to skip the early
+ compaction attempt that we do in some cases
+
+Link: https://lkml.kernel.org/r/20240221114357.13655-2-vbabka@suse.cz
+Fixes: 3250845d0526 ("Revert "mm, oom: prevent premature OOM killer invocation for high order request"")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: Sven van Ashbrook <svenva@chromium.org>
+Closes: https://lore.kernel.org/all/CAG-rBihs_xMKb3wrMO1%2B-%2Bp4fowP9oy1pa_OTkfxBzPUVOZF%2Bg@mail.gmail.com/
+Tested-by: Karthikeyan Ramasubramanian <kramasub@chromium.org>
+Cc: Brian Geffon <bgeffon@google.com>
+Cc: Curtis Malainey <cujomalainey@chromium.org>
+Cc: Jaroslav Kysela <perex@perex.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Takashi Iwai <tiwai@suse.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/gfp.h | 9 +++++++++
+ mm/compaction.c | 7 +------
+ mm/page_alloc.c | 10 ++++++----
+ mm/vmscan.c | 5 ++++-
+ 4 files changed, 20 insertions(+), 11 deletions(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -623,6 +623,15 @@ static inline bool pm_suspended_storage(
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
++/*
++ * Check if the gfp flags allow compaction - GFP_NOIO is a really
++ * tricky context because the migration might require IO.
++ */
++static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
++{
++ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
++}
++
+ #ifdef CONFIG_CONTIG_ALLOC
+ /* The below functions must be run on a range from a single zone. */
+ extern int alloc_contig_range(unsigned long start, unsigned long end,
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -2466,16 +2466,11 @@ enum compact_result try_to_compact_pages
+ unsigned int alloc_flags, const struct alloc_context *ac,
+ enum compact_priority prio, struct page **capture)
+ {
+- int may_perform_io = gfp_mask & __GFP_IO;
+ struct zoneref *z;
+ struct zone *zone;
+ enum compact_result rc = COMPACT_SKIPPED;
+
+- /*
+- * Check if the GFP flags allow compaction - GFP_NOIO is really
+- * tricky context because the migration might require IO
+- */
+- if (!may_perform_io)
++ if (!gfp_compaction_allowed(gfp_mask))
+ return COMPACT_SKIPPED;
+
+ trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4644,6 +4644,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, u
+ struct alloc_context *ac)
+ {
+ bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
++ bool can_compact = gfp_compaction_allowed(gfp_mask);
+ const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
+ struct page *page = NULL;
+ unsigned int alloc_flags;
+@@ -4709,7 +4710,7 @@ restart:
+ * Don't try this for allocations that are allowed to ignore
+ * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
+ */
+- if (can_direct_reclaim &&
++ if (can_direct_reclaim && can_compact &&
+ (costly_order ||
+ (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
+ && !gfp_pfmemalloc_allowed(gfp_mask)) {
+@@ -4806,9 +4807,10 @@ retry:
+
+ /*
+ * Do not retry costly high order allocations unless they are
+- * __GFP_RETRY_MAYFAIL
++ * __GFP_RETRY_MAYFAIL and we can compact
+ */
+- if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
++ if (costly_order && (!can_compact ||
++ !(gfp_mask & __GFP_RETRY_MAYFAIL)))
+ goto nopage;
+
+ if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
+@@ -4821,7 +4823,7 @@ retry:
+ * implementation of the compaction depends on the sufficient amount
+ * of free memory (see __compaction_suitable)
+ */
+- if (did_some_progress > 0 &&
++ if (did_some_progress > 0 && can_compact &&
+ should_compact_retry(ac, order, alloc_flags,
+ compact_result, &compact_priority,
+ &compaction_retries))
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2546,7 +2546,7 @@ static void shrink_lruvec(struct lruvec
+ /* Use reclaim/compaction for costly allocs or under memory pressure */
+ static bool in_reclaim_compaction(struct scan_control *sc)
+ {
+- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
++ if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ sc->priority < DEF_PRIORITY - 2))
+ return true;
+@@ -2873,6 +2873,9 @@ static inline bool compaction_ready(stru
+ unsigned long watermark;
+ enum compact_result suitable;
+
++ if (!gfp_compaction_allowed(sc->gfp_mask))
++ return false;
++
+ suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
+ if (suitable == COMPACT_SUCCESS)
+ /* Allocation should succeed already. Don't reclaim. */
--- /dev/null
+From a5ef7d68cea1344cf524f04981c2b3f80bedbb0d Mon Sep 17 00:00:00 2001
+From: Pu Wen <puwen@hygon.cn>
+Date: Thu, 28 Sep 2023 14:59:16 +0800
+Subject: x86/srso: Add SRSO mitigation for Hygon processors
+
+From: Pu Wen <puwen@hygon.cn>
+
+commit a5ef7d68cea1344cf524f04981c2b3f80bedbb0d upstream.
+
+Add mitigation for the speculative return stack overflow vulnerability
+which exists on Hygon processors too.
+
+Signed-off-by: Pu Wen <puwen@hygon.cn>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/tencent_4A14812842F104E93AA722EC939483CEFF05@qq.com
+Signed-off-by: Ashwin Dayanand Kamat <ashwin.kamat@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1177,7 +1177,7 @@ static const struct x86_cpu_id cpu_vuln_
+ VULNBL_AMD(0x15, RETBLEED),
+ VULNBL_AMD(0x16, RETBLEED),
+ VULNBL_AMD(0x17, RETBLEED | SRSO),
+- VULNBL_HYGON(0x18, RETBLEED),
++ VULNBL_HYGON(0x18, RETBLEED | SRSO),
+ VULNBL_AMD(0x19, SRSO),
+ {}
+ };