]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.11-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 10:32:17 +0000 (12:32 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 10:32:17 +0000 (12:32 +0200)
added patches:
bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch
compiler.h-specify-correct-attribute-for-.rodata..c_jump_table.patch
dm-verity-restart-or-panic-on-an-i-o-error.patch
exfat-resolve-memory-leak-from-exfat_create_upcase_table.patch
fbdev-xen-fbfront-assign-fb_info-device.patch
i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
i2c-isch-add-missed-else.patch
i2c-xiic-try-re-initialization-on-bus-busy-timeout.patch
lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
mm-change-vmf_anon_prepare-to-__vmf_anon_prepare.patch
mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch
mm-huge_memory-ensure-huge_zero_folio-won-t-have-large_rmappable-flag-set.patch
mm-hugetlb.c-fix-uaf-of-vma-in-hugetlb-fault-pathway.patch
mm-hugetlb_vmemmap-batch-hvo-work-when-demoting.patch
mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
module-fix-kcov-ignored-file-name.patch
s390-ftrace-avoid-calling-unwinder-in-ftrace_return_address.patch
selftest-mm-mseal-fix-test_seal_mremap_move_dontunmap_anyaddr.patch
spi-fspi-add-support-for-imx8ulp.patch
tpm-export-tpm2_sessions_init-to-fix-ibmvtpm-building.patch

22 files changed:
queue-6.11/bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch [new file with mode: 0644]
queue-6.11/compiler.h-specify-correct-attribute-for-.rodata..c_jump_table.patch [new file with mode: 0644]
queue-6.11/dm-verity-restart-or-panic-on-an-i-o-error.patch [new file with mode: 0644]
queue-6.11/exfat-resolve-memory-leak-from-exfat_create_upcase_table.patch [new file with mode: 0644]
queue-6.11/fbdev-xen-fbfront-assign-fb_info-device.patch [new file with mode: 0644]
queue-6.11/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch [new file with mode: 0644]
queue-6.11/i2c-isch-add-missed-else.patch [new file with mode: 0644]
queue-6.11/i2c-xiic-try-re-initialization-on-bus-busy-timeout.patch [new file with mode: 0644]
queue-6.11/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch [new file with mode: 0644]
queue-6.11/lsm-infrastructure-management-of-the-sock-security.patch
queue-6.11/mm-change-vmf_anon_prepare-to-__vmf_anon_prepare.patch [new file with mode: 0644]
queue-6.11/mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch [new file with mode: 0644]
queue-6.11/mm-huge_memory-ensure-huge_zero_folio-won-t-have-large_rmappable-flag-set.patch [new file with mode: 0644]
queue-6.11/mm-hugetlb.c-fix-uaf-of-vma-in-hugetlb-fault-pathway.patch [new file with mode: 0644]
queue-6.11/mm-hugetlb_vmemmap-batch-hvo-work-when-demoting.patch [new file with mode: 0644]
queue-6.11/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch [new file with mode: 0644]
queue-6.11/module-fix-kcov-ignored-file-name.patch [new file with mode: 0644]
queue-6.11/s390-ftrace-avoid-calling-unwinder-in-ftrace_return_address.patch [new file with mode: 0644]
queue-6.11/selftest-mm-mseal-fix-test_seal_mremap_move_dontunmap_anyaddr.patch [new file with mode: 0644]
queue-6.11/series
queue-6.11/spi-fspi-add-support-for-imx8ulp.patch [new file with mode: 0644]
queue-6.11/tpm-export-tpm2_sessions_init-to-fix-ibmvtpm-building.patch [new file with mode: 0644]

diff --git a/queue-6.11/bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch b/queue-6.11/bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch
new file mode 100644 (file)
index 0000000..bdaadda
--- /dev/null
@@ -0,0 +1,36 @@
+From 300a90b2cb5d442879e6398920c49aebbd5c8e40 Mon Sep 17 00:00:00 2001
+From: Song Liu <song@kernel.org>
+Date: Tue, 10 Sep 2024 22:55:08 -0700
+Subject: bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
+
+From: Song Liu <song@kernel.org>
+
+commit 300a90b2cb5d442879e6398920c49aebbd5c8e40 upstream.
+
+bpf task local storage is now using task_struct->bpf_storage, so
+bpf_lsm_blob_sizes.lbs_task is no longer needed. Remove it to save some
+memory.
+
+Fixes: a10787e6d58c ("bpf: Enable task local storage for tracing programs")
+Cc: stable@vger.kernel.org
+Cc: KP Singh <kpsingh@kernel.org>
+Cc: Matt Bobrowski <mattbobrowski@google.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Acked-by: Matt Bobrowski <mattbobrowski@google.com>
+Link: https://lore.kernel.org/r/20240911055508.9588-1-song@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/bpf/hooks.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/security/bpf/hooks.c
++++ b/security/bpf/hooks.c
+@@ -31,7 +31,6 @@ static int __init bpf_lsm_init(void)
+ struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = {
+       .lbs_inode = sizeof(struct bpf_storage_blob),
+-      .lbs_task = sizeof(struct bpf_storage_blob),
+ };
+ DEFINE_LSM(bpf) = {
diff --git a/queue-6.11/compiler.h-specify-correct-attribute-for-.rodata..c_jump_table.patch b/queue-6.11/compiler.h-specify-correct-attribute-for-.rodata..c_jump_table.patch
new file mode 100644 (file)
index 0000000..1b399a3
--- /dev/null
@@ -0,0 +1,66 @@
+From c5b1184decc819756ae549ba54c63b6790c4ddfd Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Tue, 24 Sep 2024 14:27:10 +0800
+Subject: compiler.h: specify correct attribute for .rodata..c_jump_table
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit c5b1184decc819756ae549ba54c63b6790c4ddfd upstream.
+
+Currently, there is an assembler message when generating kernel/bpf/core.o
+under CONFIG_OBJTOOL with LoongArch compiler toolchain:
+
+  Warning: setting incorrect section attributes for .rodata..c_jump_table
+
+This is because the section ".rodata..c_jump_table" should be readonly,
+but there is a "W" (writable) part of the flags:
+
+  $ readelf -S kernel/bpf/core.o | grep -A 1 "rodata..c"
+  [34] .rodata..c_j[...] PROGBITS         0000000000000000  0000d2e0
+       0000000000000800  0000000000000000  WA       0     0     8
+
+There is no above issue on x86 due to the generated section flag is only
+"A" (allocatable). In order to silence the warning on LoongArch, specify
+the attribute like ".rodata..c_jump_table,\"a\",@progbits #" explicitly,
+then the section attribute of ".rodata..c_jump_table" must be readonly
+in the kernel/bpf/core.o file.
+
+Before:
+
+  $ objdump -h kernel/bpf/core.o | grep -A 1 "rodata..c"
+   21 .rodata..c_jump_table 00000800  0000000000000000  0000000000000000  0000d2e0  2**3
+                  CONTENTS, ALLOC, LOAD, RELOC, DATA
+
+After:
+
+  $ objdump -h kernel/bpf/core.o | grep -A 1 "rodata..c"
+   21 .rodata..c_jump_table 00000800  0000000000000000  0000000000000000  0000d2e0  2**3
+                  CONTENTS, ALLOC, LOAD, RELOC, READONLY, DATA
+
+By the way, AFAICT, maybe the root cause is related with the different
+compiler behavior of various archs, so to some extent this change is a
+workaround for LoongArch, and also there is no effect for x86 which is the
+only port supported by objtool before LoongArch with this patch.
+
+Link: https://lkml.kernel.org/r/20240924062710.1243-1-yangtiezhu@loongson.cn
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Cc: Josh Poimboeuf <jpoimboe@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>   [6.9+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_
+ #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+ /* Annotate a C jump table to allow objtool to follow the code flow */
+-#define __annotate_jump_table __section(".rodata..c_jump_table")
++#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #")
+ #else /* !CONFIG_OBJTOOL */
+ #define annotate_reachable()
diff --git a/queue-6.11/dm-verity-restart-or-panic-on-an-i-o-error.patch b/queue-6.11/dm-verity-restart-or-panic-on-an-i-o-error.patch
new file mode 100644 (file)
index 0000000..ad5eff0
--- /dev/null
@@ -0,0 +1,69 @@
+From e6a3531dd542cb127c8de32ab1e54a48ae19962b Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Tue, 24 Sep 2024 15:18:29 +0200
+Subject: dm-verity: restart or panic on an I/O error
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit e6a3531dd542cb127c8de32ab1e54a48ae19962b upstream.
+
+Maxim Suhanov reported that dm-verity doesn't crash if an I/O error
+happens. In theory, this could be used to subvert security, because an
+attacker can create sectors that return error with the Write Uncorrectable
+command. Some programs may misbehave if they have to deal with EIO.
+
+This commit fixes dm-verity, so that if "panic_on_corruption" or
+"restart_on_corruption" was specified and an I/O error happens, the
+machine will panic or restart.
+
+This commit also changes kernel_restart to emergency_restart -
+kernel_restart calls reboot notifiers and these reboot notifiers may wait
+for the bio that failed. emergency_restart doesn't call the notifiers.
+
+Reported-by: Maxim Suhanov <dfirblog@gmail.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/dm-verity-target.c |   23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -272,8 +272,10 @@ out:
+       if (v->mode == DM_VERITY_MODE_LOGGING)
+               return 0;
+-      if (v->mode == DM_VERITY_MODE_RESTART)
+-              kernel_restart("dm-verity device corrupted");
++      if (v->mode == DM_VERITY_MODE_RESTART) {
++              pr_emerg("dm-verity device corrupted\n");
++              emergency_restart();
++      }
+       if (v->mode == DM_VERITY_MODE_PANIC)
+               panic("dm-verity device corrupted");
+@@ -596,6 +598,23 @@ static void verity_finish_io(struct dm_v
+       if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
+               verity_fec_finish_io(io);
++      if (unlikely(status != BLK_STS_OK) &&
++          unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
++          !verity_is_system_shutting_down()) {
++              if (v->mode == DM_VERITY_MODE_RESTART ||
++                  v->mode == DM_VERITY_MODE_PANIC)
++                      DMERR_LIMIT("%s has error: %s", v->data_dev->name,
++                                      blk_status_to_str(status));
++
++              if (v->mode == DM_VERITY_MODE_RESTART) {
++                      pr_emerg("dm-verity device corrupted\n");
++                      emergency_restart();
++              }
++
++              if (v->mode == DM_VERITY_MODE_PANIC)
++                      panic("dm-verity device corrupted");
++      }
++
+       bio_endio(bio);
+ }
diff --git a/queue-6.11/exfat-resolve-memory-leak-from-exfat_create_upcase_table.patch b/queue-6.11/exfat-resolve-memory-leak-from-exfat_create_upcase_table.patch
new file mode 100644 (file)
index 0000000..3a75484
--- /dev/null
@@ -0,0 +1,42 @@
+From c290fe508eee36df1640c3cb35dc8f89e073c8a8 Mon Sep 17 00:00:00 2001
+From: Daniel Yang <danielyangkang@gmail.com>
+Date: Mon, 16 Sep 2024 16:05:06 -0700
+Subject: exfat: resolve memory leak from exfat_create_upcase_table()
+
+From: Daniel Yang <danielyangkang@gmail.com>
+
+commit c290fe508eee36df1640c3cb35dc8f89e073c8a8 upstream.
+
+If exfat_load_upcase_table reaches end and returns -EINVAL,
+allocated memory doesn't get freed and while
+exfat_load_default_upcase_table allocates more memory, leading to a
+memory leak.
+
+Here's link to syzkaller crash report illustrating this issue:
+https://syzkaller.appspot.com/text?tag=CrashReport&x=1406c201980000
+
+Reported-by: syzbot+e1c69cadec0f1a078e3d@syzkaller.appspotmail.com
+Fixes: a13d1a4de3b0 ("exfat: move freeing sbi, upcase table and dropping nls into rcu-delayed helper")
+Cc: stable@vger.kernel.org
+Signed-off-by: Daniel Yang <danielyangkang@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/exfat/nls.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/exfat/nls.c
++++ b/fs/exfat/nls.c
+@@ -779,8 +779,11 @@ int exfat_create_upcase_table(struct sup
+                               le32_to_cpu(ep->dentry.upcase.checksum));
+                       brelse(bh);
+-                      if (ret && ret != -EIO)
++                      if (ret && ret != -EIO) {
++                              /* free memory from exfat_load_upcase_table call */
++                              exfat_free_upcase_table(sbi);
+                               goto load_default;
++                      }
+                       /* load successfully */
+                       return ret;
diff --git a/queue-6.11/fbdev-xen-fbfront-assign-fb_info-device.patch b/queue-6.11/fbdev-xen-fbfront-assign-fb_info-device.patch
new file mode 100644 (file)
index 0000000..d528b5e
--- /dev/null
@@ -0,0 +1,43 @@
+From c2af2a45560bd4046c2e109152acde029ed0acc2 Mon Sep 17 00:00:00 2001
+From: Jason Andryuk <jason.andryuk@amd.com>
+Date: Mon, 9 Sep 2024 22:09:16 -0400
+Subject: fbdev: xen-fbfront: Assign fb_info->device
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jason Andryuk <jason.andryuk@amd.com>
+
+commit c2af2a45560bd4046c2e109152acde029ed0acc2 upstream.
+
+Probing xen-fbfront faults in video_is_primary_device().  The passed-in
+struct device is NULL since xen-fbfront doesn't assign it and the
+memory is kzalloc()-ed.  Assign fb_info->device to avoid this.
+
+This was exposed by the conversion of fb_is_primary_device() to
+video_is_primary_device() which dropped a NULL check for struct device.
+
+Fixes: f178e96de7f0 ("arch: Remove struct fb_info from video helpers")
+Reported-by: Arthur Borsboom <arthurborsboom@gmail.com>
+Closes: https://lore.kernel.org/xen-devel/CALUcmUncX=LkXWeiSiTKsDY-cOe8QksWhFvcCneOKfrKd0ZajA@mail.gmail.com/
+Tested-by: Arthur Borsboom <arthurborsboom@gmail.com>
+CC: stable@vger.kernel.org
+Signed-off-by: Jason Andryuk <jason.andryuk@amd.com>
+Reviewed-by: Roger Pau MonnĂ© <roger.pau@citrix.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/xen-fbfront.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/video/fbdev/xen-fbfront.c
++++ b/drivers/video/fbdev/xen-fbfront.c
+@@ -407,6 +407,7 @@ static int xenfb_probe(struct xenbus_dev
+       /* complete the abuse: */
+       fb_info->pseudo_palette = fb_info->par;
+       fb_info->par = info;
++      fb_info->device = &dev->dev;
+       fb_info->screen_buffer = info->fb;
diff --git a/queue-6.11/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch b/queue-6.11/i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
new file mode 100644 (file)
index 0000000..ac5021c
--- /dev/null
@@ -0,0 +1,63 @@
+From 93701d3b84ac5f3ea07259d4ced405c53d757985 Mon Sep 17 00:00:00 2001
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+Date: Wed, 11 Sep 2024 17:39:51 +0800
+Subject: i2c: aspeed: Update the stop sw state when the bus recovery occurs
+
+From: Tommy Huang <tommy_huang@aspeedtech.com>
+
+commit 93701d3b84ac5f3ea07259d4ced405c53d757985 upstream.
+
+When the i2c bus recovery occurs, driver will send i2c stop command
+in the scl low condition. In this case the sw state will still keep
+original situation. Under multi-master usage, i2c bus recovery will
+be called when i2c transfer timeout occurs. Update the stop command
+calling with aspeed_i2c_do_stop function to update master_state.
+
+Fixes: f327c686d3ba ("i2c: aspeed: added driver for Aspeed I2C")
+Cc: stable@vger.kernel.org # v4.13+
+Signed-off-by: Tommy Huang <tommy_huang@aspeedtech.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-aspeed.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
+ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
++/* precondition: bus.lock has been acquired. */
++static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
++{
++      bus->master_state = ASPEED_I2C_MASTER_STOP;
++      writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++}
++
+ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
+ {
+       unsigned long time_left, flags;
+@@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct
+                       command);
+               reinit_completion(&bus->cmd_complete);
+-              writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
++              aspeed_i2c_do_stop(bus);
+               spin_unlock_irqrestore(&bus->lock, flags);
+               time_left = wait_for_completion_timeout(
+@@ -391,13 +398,6 @@ static void aspeed_i2c_do_start(struct a
+ }
+ /* precondition: bus.lock has been acquired. */
+-static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus)
+-{
+-      bus->master_state = ASPEED_I2C_MASTER_STOP;
+-      writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG);
+-}
+-
+-/* precondition: bus.lock has been acquired. */
+ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
+ {
+       if (bus->msgs_index + 1 < bus->msgs_count) {
diff --git a/queue-6.11/i2c-isch-add-missed-else.patch b/queue-6.11/i2c-isch-add-missed-else.patch
new file mode 100644 (file)
index 0000000..dfbf4de
--- /dev/null
@@ -0,0 +1,34 @@
+From 1db4da55070d6a2754efeb3743f5312fc32f5961 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 11 Sep 2024 18:39:14 +0300
+Subject: i2c: isch: Add missed 'else'
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 1db4da55070d6a2754efeb3743f5312fc32f5961 upstream.
+
+In accordance with the existing comment and code analysis
+it is quite likely that there is a missed 'else' when adapter
+times out. Add it.
+
+Fixes: 5bc1200852c3 ("i2c: Add Intel SCH SMBus support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v2.6.27+
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-isch.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-isch.c
++++ b/drivers/i2c/busses/i2c-isch.c
+@@ -99,8 +99,7 @@ static int sch_transaction(void)
+       if (retries > MAX_RETRIES) {
+               dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
+               result = -ETIMEDOUT;
+-      }
+-      if (temp & 0x04) {
++      } else if (temp & 0x04) {
+               result = -EIO;
+               dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
+                       "locked until next hard reset. (sorry!)\n");
diff --git a/queue-6.11/i2c-xiic-try-re-initialization-on-bus-busy-timeout.patch b/queue-6.11/i2c-xiic-try-re-initialization-on-bus-busy-timeout.patch
new file mode 100644 (file)
index 0000000..c3ad03d
--- /dev/null
@@ -0,0 +1,100 @@
+From 1d4a1adbed2582444aaf97671858b7d12915bd05 Mon Sep 17 00:00:00 2001
+From: Robert Hancock <robert.hancock@calian.com>
+Date: Wed, 11 Sep 2024 22:16:53 +0200
+Subject: i2c: xiic: Try re-initialization on bus busy timeout
+
+From: Robert Hancock <robert.hancock@calian.com>
+
+commit 1d4a1adbed2582444aaf97671858b7d12915bd05 upstream.
+
+In the event that the I2C bus was powered down when the I2C controller
+driver loads, or some spurious pulses occur on the I2C bus, it's
+possible that the controller detects a spurious I2C "start" condition.
+In this situation it may continue to report the bus is busy indefinitely
+and block the controller from working.
+
+The "single-master" DT flag can be specified to disable bus busy checks
+entirely, but this may not be safe to use in situations where other I2C
+masters may potentially exist.
+
+In the event that the controller reports "bus busy" for too long when
+starting a transaction, we can try reinitializing the controller to see
+if the busy condition clears. This allows recovering from this scenario.
+
+Fixes: e1d5b6598cdc ("i2c: Add support for Xilinx XPS IIC Bus Interface")
+Signed-off-by: Robert Hancock <robert.hancock@calian.com>
+Cc: <stable@vger.kernel.org> # v2.6.34+
+Reviewed-by: Manikanta Guntupalli <manikanta.guntupalli@amd.com>
+Acked-by: Michal Simek <michal.simek@amd.com>
+Signed-off-by: Andi Shyti <andi.shyti@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/i2c/busses/i2c-xiic.c |   41 ++++++++++++++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 15 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -844,23 +844,11 @@ static int xiic_bus_busy(struct xiic_i2c
+       return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
+ }
+-static int xiic_busy(struct xiic_i2c *i2c)
++static int xiic_wait_not_busy(struct xiic_i2c *i2c)
+ {
+       int tries = 3;
+       int err;
+-      if (i2c->tx_msg || i2c->rx_msg)
+-              return -EBUSY;
+-
+-      /* In single master mode bus can only be busy, when in use by this
+-       * driver. If the register indicates bus being busy for some reason we
+-       * should ignore it, since bus will never be released and i2c will be
+-       * stuck forever.
+-       */
+-      if (i2c->singlemaster) {
+-              return 0;
+-      }
+-
+       /* for instance if previous transfer was terminated due to TX error
+        * it might be that the bus is on it's way to become available
+        * give it at most 3 ms to wake
+@@ -1104,13 +1092,36 @@ static int xiic_start_xfer(struct xiic_i
+       mutex_lock(&i2c->lock);
+-      ret = xiic_busy(i2c);
+-      if (ret) {
++      if (i2c->tx_msg || i2c->rx_msg) {
+               dev_err(i2c->adap.dev.parent,
+                       "cannot start a transfer while busy\n");
++              ret = -EBUSY;
+               goto out;
+       }
++      /* In single master mode bus can only be busy, when in use by this
++       * driver. If the register indicates bus being busy for some reason we
++       * should ignore it, since bus will never be released and i2c will be
++       * stuck forever.
++       */
++      if (!i2c->singlemaster) {
++              ret = xiic_wait_not_busy(i2c);
++              if (ret) {
++                      /* If the bus is stuck in a busy state, such as due to spurious low
++                       * pulses on the bus causing a false start condition to be detected,
++                       * then try to recover by re-initializing the controller and check
++                       * again if the bus is still busy.
++                       */
++                      dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n");
++                      ret = xiic_reinit(i2c);
++                      if (ret)
++                              goto out;
++                      ret = xiic_wait_not_busy(i2c);
++                      if (ret)
++                              goto out;
++              }
++      }
++
+       i2c->tx_msg = msgs;
+       i2c->rx_msg = NULL;
+       i2c->nmsgs = num;
diff --git a/queue-6.11/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch b/queue-6.11/lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
new file mode 100644 (file)
index 0000000..5d3fe62
--- /dev/null
@@ -0,0 +1,215 @@
+From a6f88ac32c6e63e69c595bfae220d8641704c9b7 Mon Sep 17 00:00:00 2001
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Date: Thu, 20 Jun 2024 22:54:34 +0000
+Subject: lockdep: fix deadlock issue between lockdep and rcu
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+commit a6f88ac32c6e63e69c595bfae220d8641704c9b7 upstream.
+
+There is a deadlock scenario between lockdep and rcu when
+rcu nocb feature is enabled, just as following call stack:
+
+     rcuop/x
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
+-001|queued_spin_lock(inline) // try to hold nocb_gp_lock
+-001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
+-002|__raw_spin_lock_irqsave(inline)
+-002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
+-003|wake_nocb_gp_defer(inline)
+-003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
+-004|__call_rcu_common(inline)
+-004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
+-005|call_rcu_zapped(inline)
+-005|free_zapped_rcu(ch = ?)// hold graph lock
+-006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
+-007|nocb_cb_wait(inline)
+-007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
+-008|kthread(_create = 0xFFFFFF80803122C0)
+-009|ret_from_fork(asm)
+
+     rcuop/y
+-000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
+-001|queued_spin_lock()
+-001|lockdep_lock()
+-001|graph_lock() // try to hold graph lock
+-002|lookup_chain_cache_add()
+-002|validate_chain()
+-003|lock_acquire
+-004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
+-005|lock_timer_base(inline)
+-006|mod_timer(inline)
+-006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
+-006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
+-007|__call_rcu_common(inline)
+-007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
+-008|call_rcu_hurry(inline)
+-008|rcu_sync_call(inline)
+-008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
+-009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
+-010|nocb_cb_wait(inline)
+-010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
+-011|kthread(_create = 0xFFFFFF8080363740)
+-012|ret_from_fork(asm)
+
+rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
+This patch release the graph lock before lockdep call_rcu.
+
+Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
+Cc: stable@vger.kernel.org
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: Carlos Llamas <cmllamas@google.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
+Reviewed-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Link: https://lore.kernel.org/r/20240620225436.3127927-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/locking/lockdep.c |   48 +++++++++++++++++++++++++++++++----------------
+ 1 file changed, 32 insertions(+), 16 deletions(-)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -6196,25 +6196,27 @@ static struct pending_free *get_pending_
+ static void free_zapped_rcu(struct rcu_head *cb);
+ /*
+- * Schedule an RCU callback if no RCU callback is pending. Must be called with
+- * the graph lock held.
+- */
+-static void call_rcu_zapped(struct pending_free *pf)
++* See if we need to queue an RCU callback, must called with
++* the lockdep lock held, returns false if either we don't have
++* any pending free or the callback is already scheduled.
++* Otherwise, a call_rcu() must follow this function call.
++*/
++static bool prepare_call_rcu_zapped(struct pending_free *pf)
+ {
+       WARN_ON_ONCE(inside_selftest());
+       if (list_empty(&pf->zapped))
+-              return;
++              return false;
+       if (delayed_free.scheduled)
+-              return;
++              return false;
+       delayed_free.scheduled = true;
+       WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
+       delayed_free.index ^= 1;
+-      call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++      return true;
+ }
+ /* The caller must hold the graph lock. May be called from RCU context. */
+@@ -6240,6 +6242,7 @@ static void free_zapped_rcu(struct rcu_h
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
+               return;
+@@ -6251,14 +6254,18 @@ static void free_zapped_rcu(struct rcu_h
+       pf = delayed_free.pf + (delayed_free.index ^ 1);
+       __free_zapped_classes(pf);
+       delayed_free.scheduled = false;
++      need_callback =
++              prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      lockdep_unlock();
++      raw_local_irq_restore(flags);
+       /*
+-       * If there's anything on the open list, close and start a new callback.
+-       */
+-      call_rcu_zapped(delayed_free.pf + delayed_free.index);
++      * If there's pending free and its callback has not been scheduled,
++      * queue an RCU callback.
++      */
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+-      lockdep_unlock();
+-      raw_local_irq_restore(flags);
+ }
+ /*
+@@ -6298,6 +6305,7 @@ static void lockdep_free_key_range_reg(v
+ {
+       struct pending_free *pf;
+       unsigned long flags;
++      bool need_callback;
+       init_data_structures_once();
+@@ -6305,10 +6313,11 @@ static void lockdep_free_key_range_reg(v
+       lockdep_lock();
+       pf = get_pending_free();
+       __lockdep_free_key_range(pf, start, size);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
+-
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+       /*
+        * Wait for any possible iterators from look_up_lock_class() to pass
+        * before continuing to free the memory they refer to.
+@@ -6402,6 +6411,7 @@ static void lockdep_reset_lock_reg(struc
+       struct pending_free *pf;
+       unsigned long flags;
+       int locked;
++      bool need_callback = false;
+       raw_local_irq_save(flags);
+       locked = graph_lock();
+@@ -6410,11 +6420,13 @@ static void lockdep_reset_lock_reg(struc
+       pf = get_pending_free();
+       __lockdep_reset_lock(pf, lock);
+-      call_rcu_zapped(pf);
++      need_callback = prepare_call_rcu_zapped(pf);
+       graph_unlock();
+ out_irq:
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+ }
+ /*
+@@ -6458,6 +6470,7 @@ void lockdep_unregister_key(struct lock_
+       struct pending_free *pf;
+       unsigned long flags;
+       bool found = false;
++      bool need_callback = false;
+       might_sleep();
+@@ -6478,11 +6491,14 @@ void lockdep_unregister_key(struct lock_
+       if (found) {
+               pf = get_pending_free();
+               __lockdep_free_key_range(pf, key, 1);
+-              call_rcu_zapped(pf);
++              need_callback = prepare_call_rcu_zapped(pf);
+       }
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
++      if (need_callback)
++              call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
++
+       /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+       synchronize_rcu();
+ }
index 12efc1cdb8be3d2f2d5051946a29236f530b4654..2421d042f08363b153bf9ea98a78288b7143e4bb 100644 (file)
@@ -23,21 +23,19 @@ Signed-off-by: Paul Moore <paul@paul-moore.com>
 Stable-dep-of: 63dff3e48871 ("lsm: add the inode_free_security_rcu() LSM implementation hook")
 Signed-off-by: Sasha Levin <sashal@kernel.org>
 ---
- include/linux/lsm_hooks.h         |  1 +
- security/apparmor/include/net.h   |  3 +-
- security/apparmor/lsm.c           | 17 +------
- security/apparmor/net.c           |  2 +-
- security/security.c               | 36 +++++++++++++-
- security/selinux/hooks.c          | 80 ++++++++++++++-----------------
- security/selinux/include/objsec.h |  5 ++
- security/selinux/netlabel.c       | 23 ++++-----
- security/smack/smack.h            |  5 ++
- security/smack/smack_lsm.c        | 70 +++++++++++++--------------
- security/smack/smack_netfilter.c  |  4 +-
+ include/linux/lsm_hooks.h         |    1 
+ security/apparmor/include/net.h   |    3 -
+ security/apparmor/lsm.c           |   17 --------
+ security/apparmor/net.c           |    2 
+ security/security.c               |   36 ++++++++++++++++-
+ security/selinux/hooks.c          |   80 +++++++++++++++++---------------------
+ security/selinux/include/objsec.h |    5 ++
+ security/selinux/netlabel.c       |   23 +++++-----
+ security/smack/smack.h            |    5 ++
+ security/smack/smack_lsm.c        |   70 +++++++++++++++------------------
+ security/smack/smack_netfilter.c  |    4 -
  11 files changed, 133 insertions(+), 113 deletions(-)
 
-diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
-index a2ade0ffe9e7d..efd4a0655159c 100644
 --- a/include/linux/lsm_hooks.h
 +++ b/include/linux/lsm_hooks.h
 @@ -73,6 +73,7 @@ struct lsm_blob_sizes {
@@ -48,8 +46,6 @@ index a2ade0ffe9e7d..efd4a0655159c 100644
        int     lbs_superblock;
        int     lbs_ipc;
        int     lbs_msg_msg;
-diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
-index 67bf888c3bd6b..c42ed8a73f1ce 100644
 --- a/security/apparmor/include/net.h
 +++ b/security/apparmor/include/net.h
 @@ -51,10 +51,9 @@ struct aa_sk_ctx {
@@ -64,11 +60,9 @@ index 67bf888c3bd6b..c42ed8a73f1ce 100644
  }
  
  #define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P)                                 \
-diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
-index 808060f9effb7..f5d05297d59ee 100644
 --- a/security/apparmor/lsm.c
 +++ b/security/apparmor/lsm.c
-@@ -1058,27 +1058,12 @@ static int apparmor_userns_create(const struct cred *cred)
+@@ -1058,27 +1058,12 @@ static int apparmor_userns_create(const
        return error;
  }
  
@@ -96,7 +90,7 @@ index 808060f9effb7..f5d05297d59ee 100644
  }
  
  /**
-@@ -1433,6 +1418,7 @@ struct lsm_blob_sizes apparmor_blob_sizes __ro_after_init = {
+@@ -1433,6 +1418,7 @@ struct lsm_blob_sizes apparmor_blob_size
        .lbs_cred = sizeof(struct aa_label *),
        .lbs_file = sizeof(struct aa_file_ctx),
        .lbs_task = sizeof(struct aa_task_ctx),
@@ -104,7 +98,7 @@ index 808060f9effb7..f5d05297d59ee 100644
  };
  
  static const struct lsm_id apparmor_lsmid = {
-@@ -1478,7 +1464,6 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
+@@ -1478,7 +1464,6 @@ static struct security_hook_list apparmo
        LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
        LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
  
@@ -112,11 +106,9 @@ index 808060f9effb7..f5d05297d59ee 100644
        LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
        LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
  
-diff --git a/security/apparmor/net.c b/security/apparmor/net.c
-index 87e934b2b5488..77413a5191179 100644
 --- a/security/apparmor/net.c
 +++ b/security/apparmor/net.c
-@@ -151,7 +151,7 @@ static int aa_label_sk_perm(const struct cred *subj_cred,
+@@ -151,7 +151,7 @@ static int aa_label_sk_perm(const struct
                            const char *op, u32 request,
                            struct sock *sk)
  {
@@ -125,8 +117,6 @@ index 87e934b2b5488..77413a5191179 100644
        int error = 0;
  
        AA_BUG(!label);
-diff --git a/security/security.c b/security/security.c
-index 41ab07eafc7fa..43166e341526c 100644
 --- a/security/security.c
 +++ b/security/security.c
 @@ -29,6 +29,7 @@
@@ -137,7 +127,7 @@ index 41ab07eafc7fa..43166e341526c 100644
  
  /* How many LSMs were built into the kernel? */
  #define LSM_COUNT (__end_lsm_info - __start_lsm_info)
-@@ -227,6 +228,7 @@ static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
+@@ -227,6 +228,7 @@ static void __init lsm_set_blob_sizes(st
        lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
        lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
        lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
@@ -145,7 +135,7 @@ index 41ab07eafc7fa..43166e341526c 100644
        lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
        lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
        lsm_set_blob_size(&needed->lbs_xattr_count,
-@@ -401,6 +403,7 @@ static void __init ordered_lsm_init(void)
+@@ -401,6 +403,7 @@ static void __init ordered_lsm_init(void
        init_debug("inode blob size      = %d\n", blob_sizes.lbs_inode);
        init_debug("ipc blob size        = %d\n", blob_sizes.lbs_ipc);
        init_debug("msg_msg blob size    = %d\n", blob_sizes.lbs_msg_msg);
@@ -153,11 +143,10 @@ index 41ab07eafc7fa..43166e341526c 100644
        init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
        init_debug("task blob size       = %d\n", blob_sizes.lbs_task);
        init_debug("xattr slots          = %d\n", blob_sizes.lbs_xattr_count);
-@@ -4673,6 +4676,28 @@ int security_socket_getpeersec_dgram(struct socket *sock,
- }
+@@ -4674,6 +4677,28 @@ int security_socket_getpeersec_dgram(str
  EXPORT_SYMBOL(security_socket_getpeersec_dgram);
  
-+/**
+ /**
 + * lsm_sock_alloc - allocate a composite sock blob
 + * @sock: the sock that needs a blob
 + * @priority: allocation mode
@@ -179,10 +168,11 @@ index 41ab07eafc7fa..43166e341526c 100644
 +      return 0;
 +}
 +
- /**
++/**
   * security_sk_alloc() - Allocate and initialize a sock's LSM blob
   * @sk: sock
-@@ -4686,7 +4711,14 @@ EXPORT_SYMBOL(security_socket_getpeersec_dgram);
+  * @family: protocol family
+@@ -4686,7 +4711,14 @@ EXPORT_SYMBOL(security_socket_getpeersec
   */
  int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
  {
@@ -198,7 +188,7 @@ index 41ab07eafc7fa..43166e341526c 100644
  }
  
  /**
-@@ -4698,6 +4730,8 @@ int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
+@@ -4698,6 +4730,8 @@ int security_sk_alloc(struct sock *sk, i
  void security_sk_free(struct sock *sk)
  {
        call_void_hook(sk_free_security, sk);
@@ -207,11 +197,9 @@ index 41ab07eafc7fa..43166e341526c 100644
  }
  
  /**
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 400eca4ad0fb6..c11303d662d80 100644
 --- a/security/selinux/hooks.c
 +++ b/security/selinux/hooks.c
-@@ -4594,7 +4594,7 @@ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+@@ -4594,7 +4594,7 @@ static int socket_sockcreate_sid(const s
  
  static int sock_has_perm(struct sock *sk, u32 perms)
  {
@@ -220,7 +208,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        struct common_audit_data ad;
        struct lsm_network_audit net;
  
-@@ -4662,7 +4662,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
+@@ -4662,7 +4662,7 @@ static int selinux_socket_post_create(st
        isec->initialized = LABEL_INITIALIZED;
  
        if (sock->sk) {
@@ -229,7 +217,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
                sksec->sclass = sclass;
                sksec->sid = sid;
                /* Allows detection of the first association on this socket */
-@@ -4678,8 +4678,8 @@ static int selinux_socket_post_create(struct socket *sock, int family,
+@@ -4678,8 +4678,8 @@ static int selinux_socket_post_create(st
  static int selinux_socket_socketpair(struct socket *socka,
                                     struct socket *sockb)
  {
@@ -240,7 +228,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        sksec_a->peer_sid = sksec_b->sid;
        sksec_b->peer_sid = sksec_a->sid;
-@@ -4694,7 +4694,7 @@ static int selinux_socket_socketpair(struct socket *socka,
+@@ -4694,7 +4694,7 @@ static int selinux_socket_socketpair(str
  static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
  {
        struct sock *sk = sock->sk;
@@ -249,7 +237,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u16 family;
        int err;
  
-@@ -4834,7 +4834,7 @@ static int selinux_socket_connect_helper(struct socket *sock,
+@@ -4834,7 +4834,7 @@ static int selinux_socket_connect_helper
                                         struct sockaddr *address, int addrlen)
  {
        struct sock *sk = sock->sk;
@@ -258,7 +246,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        int err;
  
        err = sock_has_perm(sk, SOCKET__CONNECT);
-@@ -5012,9 +5012,9 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
+@@ -5012,9 +5012,9 @@ static int selinux_socket_unix_stream_co
                                              struct sock *other,
                                              struct sock *newsk)
  {
@@ -271,7 +259,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        struct common_audit_data ad;
        struct lsm_network_audit net;
        int err;
-@@ -5043,8 +5043,8 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
+@@ -5043,8 +5043,8 @@ static int selinux_socket_unix_stream_co
  static int selinux_socket_unix_may_send(struct socket *sock,
                                        struct socket *other)
  {
@@ -282,7 +270,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        struct common_audit_data ad;
        struct lsm_network_audit net;
  
-@@ -5081,7 +5081,7 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
+@@ -5081,7 +5081,7 @@ static int selinux_sock_rcv_skb_compat(s
                                       u16 family)
  {
        int err = 0;
@@ -291,7 +279,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u32 sk_sid = sksec->sid;
        struct common_audit_data ad;
        struct lsm_network_audit net;
-@@ -5110,7 +5110,7 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
+@@ -5110,7 +5110,7 @@ static int selinux_sock_rcv_skb_compat(s
  static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
  {
        int err, peerlbl_active, secmark_active;
@@ -300,7 +288,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u16 family = sk->sk_family;
        u32 sk_sid = sksec->sid;
        struct common_audit_data ad;
-@@ -5178,7 +5178,7 @@ static int selinux_socket_getpeersec_stream(struct socket *sock,
+@@ -5178,7 +5178,7 @@ static int selinux_socket_getpeersec_str
        int err = 0;
        char *scontext = NULL;
        u32 scontext_len;
@@ -309,7 +297,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u32 peer_sid = SECSID_NULL;
  
        if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
-@@ -5238,34 +5238,27 @@ static int selinux_socket_getpeersec_dgram(struct socket *sock,
+@@ -5238,34 +5238,27 @@ static int selinux_socket_getpeersec_dgr
  
  static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
  {
@@ -348,7 +336,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        newsksec->sid = sksec->sid;
        newsksec->peer_sid = sksec->peer_sid;
-@@ -5279,7 +5272,7 @@ static void selinux_sk_getsecid(const struct sock *sk, u32 *secid)
+@@ -5279,7 +5272,7 @@ static void selinux_sk_getsecid(const st
        if (!sk)
                *secid = SECINITSID_ANY_SOCKET;
        else {
@@ -357,7 +345,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
                *secid = sksec->sid;
        }
-@@ -5289,7 +5282,7 @@ static void selinux_sock_graft(struct sock *sk, struct socket *parent)
+@@ -5289,7 +5282,7 @@ static void selinux_sock_graft(struct so
  {
        struct inode_security_struct *isec =
                inode_security_novalidate(SOCK_INODE(parent));
@@ -366,7 +354,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
            sk->sk_family == PF_UNIX)
-@@ -5306,7 +5299,7 @@ static int selinux_sctp_process_new_assoc(struct sctp_association *asoc,
+@@ -5306,7 +5299,7 @@ static int selinux_sctp_process_new_asso
  {
        struct sock *sk = asoc->base.sk;
        u16 family = sk->sk_family;
@@ -375,7 +363,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        struct common_audit_data ad;
        struct lsm_network_audit net;
        int err;
-@@ -5361,7 +5354,7 @@ static int selinux_sctp_process_new_assoc(struct sctp_association *asoc,
+@@ -5361,7 +5354,7 @@ static int selinux_sctp_process_new_asso
  static int selinux_sctp_assoc_request(struct sctp_association *asoc,
                                      struct sk_buff *skb)
  {
@@ -384,7 +372,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u32 conn_sid;
        int err;
  
-@@ -5394,7 +5387,7 @@ static int selinux_sctp_assoc_request(struct sctp_association *asoc,
+@@ -5394,7 +5387,7 @@ static int selinux_sctp_assoc_request(st
  static int selinux_sctp_assoc_established(struct sctp_association *asoc,
                                          struct sk_buff *skb)
  {
@@ -393,7 +381,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        if (!selinux_policycap_extsockclass())
                return 0;
-@@ -5493,8 +5486,8 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
+@@ -5493,8 +5486,8 @@ static int selinux_sctp_bind_connect(str
  static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
                                  struct sock *newsk)
  {
@@ -404,7 +392,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        /* If policy does not support SECCLASS_SCTP_SOCKET then call
         * the non-sctp clone version.
-@@ -5510,8 +5503,8 @@ static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk
+@@ -5510,8 +5503,8 @@ static void selinux_sctp_sk_clone(struct
  
  static int selinux_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
  {
@@ -415,7 +403,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        ssksec->sclass = sksec->sclass;
        ssksec->sid = sksec->sid;
-@@ -5526,7 +5519,7 @@ static int selinux_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
+@@ -5526,7 +5519,7 @@ static int selinux_mptcp_add_subflow(str
  static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
                                     struct request_sock *req)
  {
@@ -424,7 +412,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        int err;
        u16 family = req->rsk_ops->family;
        u32 connsid;
-@@ -5547,7 +5540,7 @@ static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+@@ -5547,7 +5540,7 @@ static int selinux_inet_conn_request(con
  static void selinux_inet_csk_clone(struct sock *newsk,
                                   const struct request_sock *req)
  {
@@ -433,7 +421,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        newsksec->sid = req->secid;
        newsksec->peer_sid = req->peer_secid;
-@@ -5564,7 +5557,7 @@ static void selinux_inet_csk_clone(struct sock *newsk,
+@@ -5564,7 +5557,7 @@ static void selinux_inet_csk_clone(struc
  static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
  {
        u16 family = sk->sk_family;
@@ -442,7 +430,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        /* handle mapped IPv4 packets arriving via IPv6 sockets */
        if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
-@@ -5639,7 +5632,7 @@ static int selinux_tun_dev_attach_queue(void *security)
+@@ -5639,7 +5632,7 @@ static int selinux_tun_dev_attach_queue(
  static int selinux_tun_dev_attach(struct sock *sk, void *security)
  {
        struct tun_security_struct *tunsec = security;
@@ -451,7 +439,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        /* we don't currently perform any NetLabel based labeling here and it
         * isn't clear that we would want to do so anyway; while we could apply
-@@ -5762,7 +5755,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb,
+@@ -5762,7 +5755,7 @@ static unsigned int selinux_ip_output(vo
                        return NF_ACCEPT;
  
                /* standard practice, label using the parent socket */
@@ -460,7 +448,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
                sid = sksec->sid;
        } else
                sid = SECINITSID_KERNEL;
-@@ -5785,7 +5778,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
+@@ -5785,7 +5778,7 @@ static unsigned int selinux_ip_postroute
        sk = skb_to_full_sk(skb);
        if (sk == NULL)
                return NF_ACCEPT;
@@ -469,7 +457,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
  
        ad_net_init_from_iif(&ad, &net, state->out->ifindex, state->pf);
        if (selinux_parse_skb(skb, &ad, NULL, 0, &proto))
-@@ -5874,7 +5867,7 @@ static unsigned int selinux_ip_postroute(void *priv,
+@@ -5874,7 +5867,7 @@ static unsigned int selinux_ip_postroute
                u32 skb_sid;
                struct sk_security_struct *sksec;
  
@@ -478,7 +466,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
                if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
                        return NF_DROP;
                /* At this point, if the returned skb peerlbl is SECSID_NULL
-@@ -5903,7 +5896,7 @@ static unsigned int selinux_ip_postroute(void *priv,
+@@ -5903,7 +5896,7 @@ static unsigned int selinux_ip_postroute
        } else {
                /* Locally generated packet, fetch the security label from the
                 * associated socket. */
@@ -487,7 +475,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
                peer_sid = sksec->sid;
                secmark_perm = PACKET__SEND;
        }
-@@ -5946,7 +5939,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
+@@ -5946,7 +5939,7 @@ static int selinux_netlink_send(struct s
        unsigned int data_len = skb->len;
        unsigned char *data = skb->data;
        struct nlmsghdr *nlh;
@@ -496,7 +484,7 @@ index 400eca4ad0fb6..c11303d662d80 100644
        u16 sclass = sksec->sclass;
        u32 perm;
  
-@@ -7004,6 +6997,7 @@ struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = {
+@@ -7004,6 +6997,7 @@ struct lsm_blob_sizes selinux_blob_sizes
        .lbs_inode = sizeof(struct inode_security_struct),
        .lbs_ipc = sizeof(struct ipc_security_struct),
        .lbs_msg_msg = sizeof(struct msg_security_struct),
@@ -504,11 +492,9 @@ index 400eca4ad0fb6..c11303d662d80 100644
        .lbs_superblock = sizeof(struct superblock_security_struct),
        .lbs_xattr_count = SELINUX_INODE_INIT_XATTRS,
  };
-diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
-index dea1d6f3ed2d3..b074099acbaf7 100644
 --- a/security/selinux/include/objsec.h
 +++ b/security/selinux/include/objsec.h
-@@ -195,4 +195,9 @@ selinux_superblock(const struct super_block *superblock)
+@@ -195,4 +195,9 @@ selinux_superblock(const struct super_bl
        return superblock->s_security + selinux_blob_sizes.lbs_superblock;
  }
  
@@ -518,8 +504,6 @@ index dea1d6f3ed2d3..b074099acbaf7 100644
 +}
 +
  #endif /* _SELINUX_OBJSEC_H_ */
-diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
-index 55885634e8804..fbe5f8c29f813 100644
 --- a/security/selinux/netlabel.c
 +++ b/security/selinux/netlabel.c
 @@ -17,6 +17,7 @@
@@ -530,7 +514,7 @@ index 55885634e8804..fbe5f8c29f813 100644
  #include <net/sock.h>
  #include <net/netlabel.h>
  #include <net/ip.h>
-@@ -68,7 +69,7 @@ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
+@@ -68,7 +69,7 @@ static int selinux_netlbl_sidlookup_cach
  static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
  {
        int rc;
@@ -539,7 +523,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct netlbl_lsm_secattr *secattr;
  
        if (sksec->nlbl_secattr != NULL)
-@@ -100,7 +101,7 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
+@@ -100,7 +101,7 @@ static struct netlbl_lsm_secattr *selinu
                                                        const struct sock *sk,
                                                        u32 sid)
  {
@@ -548,7 +532,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
  
        if (secattr == NULL)
-@@ -240,7 +241,7 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
+@@ -240,7 +241,7 @@ int selinux_netlbl_skbuff_setsid(struct
         * being labeled by it's parent socket, if it is just exit */
        sk = skb_to_full_sk(skb);
        if (sk != NULL) {
@@ -557,7 +541,7 @@ index 55885634e8804..fbe5f8c29f813 100644
  
                if (sksec->nlbl_state != NLBL_REQSKB)
                        return 0;
-@@ -277,7 +278,7 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc,
+@@ -277,7 +278,7 @@ int selinux_netlbl_sctp_assoc_request(st
  {
        int rc;
        struct netlbl_lsm_secattr secattr;
@@ -566,7 +550,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct sockaddr_in addr4;
        struct sockaddr_in6 addr6;
  
-@@ -356,7 +357,7 @@ int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
+@@ -356,7 +357,7 @@ inet_conn_request_return:
   */
  void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
  {
@@ -575,7 +559,7 @@ index 55885634e8804..fbe5f8c29f813 100644
  
        if (family == PF_INET)
                sksec->nlbl_state = NLBL_LABELED;
-@@ -374,8 +375,8 @@ void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
+@@ -374,8 +375,8 @@ void selinux_netlbl_inet_csk_clone(struc
   */
  void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk)
  {
@@ -586,7 +570,7 @@ index 55885634e8804..fbe5f8c29f813 100644
  
        newsksec->nlbl_state = sksec->nlbl_state;
  }
-@@ -393,7 +394,7 @@ void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk)
+@@ -393,7 +394,7 @@ void selinux_netlbl_sctp_sk_clone(struct
  int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
  {
        int rc;
@@ -595,7 +579,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct netlbl_lsm_secattr *secattr;
  
        if (family != PF_INET && family != PF_INET6)
-@@ -510,7 +511,7 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
+@@ -510,7 +511,7 @@ int selinux_netlbl_socket_setsockopt(str
  {
        int rc = 0;
        struct sock *sk = sock->sk;
@@ -604,7 +588,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct netlbl_lsm_secattr secattr;
  
        if (selinux_netlbl_option(level, optname) &&
-@@ -548,7 +549,7 @@ static int selinux_netlbl_socket_connect_helper(struct sock *sk,
+@@ -548,7 +549,7 @@ static int selinux_netlbl_socket_connect
                                                struct sockaddr *addr)
  {
        int rc;
@@ -613,7 +597,7 @@ index 55885634e8804..fbe5f8c29f813 100644
        struct netlbl_lsm_secattr *secattr;
  
        /* connected sockets are allowed to disconnect when the address family
-@@ -587,7 +588,7 @@ static int selinux_netlbl_socket_connect_helper(struct sock *sk,
+@@ -587,7 +588,7 @@ static int selinux_netlbl_socket_connect
  int selinux_netlbl_socket_connect_locked(struct sock *sk,
                                         struct sockaddr *addr)
  {
@@ -622,11 +606,9 @@ index 55885634e8804..fbe5f8c29f813 100644
  
        if (sksec->nlbl_state != NLBL_REQSKB &&
            sksec->nlbl_state != NLBL_CONNLABELED)
-diff --git a/security/smack/smack.h b/security/smack/smack.h
-index 041688e5a77a3..297f21446f456 100644
 --- a/security/smack/smack.h
 +++ b/security/smack/smack.h
-@@ -355,6 +355,11 @@ static inline struct superblock_smack *smack_superblock(
+@@ -355,6 +355,11 @@ static inline struct superblock_smack *s
        return superblock->s_security + smack_blob_sizes.lbs_superblock;
  }
  
@@ -638,11 +620,9 @@ index 041688e5a77a3..297f21446f456 100644
  /*
   * Is the directory transmuting?
   */
-diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
-index 002a1b9ed83a5..6ec9a40f3ec59 100644
 --- a/security/smack/smack_lsm.c
 +++ b/security/smack/smack_lsm.c
-@@ -1606,7 +1606,7 @@ static int smack_inode_getsecurity(struct mnt_idmap *idmap,
+@@ -1606,7 +1606,7 @@ static int smack_inode_getsecurity(struc
                if (sock == NULL || sock->sk == NULL)
                        return -EOPNOTSUPP;
  
@@ -651,7 +631,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
                if (strcmp(name, XATTR_SMACK_IPIN) == 0)
                        isp = ssp->smk_in;
-@@ -1994,7 +1994,7 @@ static int smack_file_receive(struct file *file)
+@@ -1994,7 +1994,7 @@ static int smack_file_receive(struct fil
  
        if (inode->i_sb->s_magic == SOCKFS_MAGIC) {
                sock = SOCKET_I(inode);
@@ -660,7 +640,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
                tsp = smack_cred(current_cred());
                /*
                 * If the receiving process can't write to the
-@@ -2409,11 +2409,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+@@ -2409,11 +2409,7 @@ static void smack_task_to_inode(struct t
  static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
  {
        struct smack_known *skp = smk_of_current();
@@ -673,7 +653,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        /*
         * Sockets created by kernel threads receive web label.
-@@ -2427,11 +2423,10 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
+@@ -2427,11 +2423,10 @@ static int smack_sk_alloc_security(struc
        }
        ssp->smk_packet = NULL;
  
@@ -686,7 +666,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  /**
   * smack_sk_free_security - Free a socket blob
   * @sk: the socket
-@@ -2440,7 +2435,6 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
+@@ -2440,7 +2435,6 @@ static int smack_sk_alloc_security(struc
   */
  static void smack_sk_free_security(struct sock *sk)
  {
@@ -694,7 +674,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smk_port_label *spp;
  
        if (sk->sk_family == PF_INET6) {
-@@ -2453,9 +2447,8 @@ static void smack_sk_free_security(struct sock *sk)
+@@ -2453,9 +2447,8 @@ static void smack_sk_free_security(struc
                }
                rcu_read_unlock();
        }
@@ -705,7 +685,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
  /**
   * smack_sk_clone_security - Copy security context
-@@ -2466,8 +2459,8 @@ static void smack_sk_free_security(struct sock *sk)
+@@ -2466,8 +2459,8 @@ static void smack_sk_free_security(struc
   */
  static void smack_sk_clone_security(const struct sock *sk, struct sock *newsk)
  {
@@ -716,7 +696,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        *ssp_new = *ssp_old;
  }
-@@ -2583,7 +2576,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
+@@ -2583,7 +2576,7 @@ static struct smack_known *smack_ipv6hos
   */
  static int smack_netlbl_add(struct sock *sk)
  {
@@ -725,7 +705,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smack_known *skp = ssp->smk_out;
        int rc;
  
-@@ -2616,7 +2609,7 @@ static int smack_netlbl_add(struct sock *sk)
+@@ -2616,7 +2609,7 @@ static int smack_netlbl_add(struct sock
   */
  static void smack_netlbl_delete(struct sock *sk)
  {
@@ -734,7 +714,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        /*
         * Take the label off the socket if one is set.
-@@ -2648,7 +2641,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap)
+@@ -2648,7 +2641,7 @@ static int smk_ipv4_check(struct sock *s
        struct smack_known *skp;
        int rc = 0;
        struct smack_known *hkp;
@@ -743,7 +723,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smk_audit_info ad;
  
        rcu_read_lock();
-@@ -2721,7 +2714,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+@@ -2721,7 +2714,7 @@ static void smk_ipv6_port_label(struct s
  {
        struct sock *sk = sock->sk;
        struct sockaddr_in6 *addr6;
@@ -752,7 +732,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smk_port_label *spp;
        unsigned short port = 0;
  
-@@ -2809,7 +2802,7 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+@@ -2809,7 +2802,7 @@ static int smk_ipv6_port_check(struct so
                                int act)
  {
        struct smk_port_label *spp;
@@ -761,7 +741,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smack_known *skp = NULL;
        unsigned short port;
        struct smack_known *object;
-@@ -2912,7 +2905,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
+@@ -2912,7 +2905,7 @@ static int smack_inode_setsecurity(struc
        if (sock == NULL || sock->sk == NULL)
                return -EOPNOTSUPP;
  
@@ -770,7 +750,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        if (strcmp(name, XATTR_SMACK_IPIN) == 0)
                ssp->smk_in = skp;
-@@ -2960,7 +2953,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
+@@ -2960,7 +2953,7 @@ static int smack_socket_post_create(stru
         * Sockets created by kernel threads receive web label.
         */
        if (unlikely(current->flags & PF_KTHREAD)) {
@@ -779,7 +759,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
                ssp->smk_in = &smack_known_web;
                ssp->smk_out = &smack_known_web;
        }
-@@ -2985,8 +2978,8 @@ static int smack_socket_post_create(struct socket *sock, int family,
+@@ -2985,8 +2978,8 @@ static int smack_socket_post_create(stru
  static int smack_socket_socketpair(struct socket *socka,
                                   struct socket *sockb)
  {
@@ -790,7 +770,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        asp->smk_packet = bsp->smk_out;
        bsp->smk_packet = asp->smk_out;
-@@ -3049,7 +3042,7 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
+@@ -3049,7 +3042,7 @@ static int smack_socket_connect(struct s
                if (__is_defined(SMACK_IPV6_SECMARK_LABELING))
                        rsp = smack_ipv6host_label(sip);
                if (rsp != NULL) {
@@ -799,7 +779,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
                        rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
                                            SMK_CONNECTING);
-@@ -3844,9 +3837,9 @@ static int smack_unix_stream_connect(struct sock *sock,
+@@ -3844,9 +3837,9 @@ static int smack_unix_stream_connect(str
  {
        struct smack_known *skp;
        struct smack_known *okp;
@@ -812,7 +792,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smk_audit_info ad;
        int rc = 0;
  #ifdef CONFIG_AUDIT
-@@ -3898,8 +3891,8 @@ static int smack_unix_stream_connect(struct sock *sock,
+@@ -3898,8 +3891,8 @@ static int smack_unix_stream_connect(str
   */
  static int smack_unix_may_send(struct socket *sock, struct socket *other)
  {
@@ -823,7 +803,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smk_audit_info ad;
        int rc;
  
-@@ -3936,7 +3929,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+@@ -3936,7 +3929,7 @@ static int smack_socket_sendmsg(struct s
        struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
  #endif
  #ifdef SMACK_IPV6_SECMARK_LABELING
@@ -832,7 +812,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smack_known *rsp;
  #endif
        int rc = 0;
-@@ -4148,7 +4141,7 @@ static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family,
+@@ -4148,7 +4141,7 @@ static struct smack_known *smack_from_ne
        netlbl_secattr_init(&secattr);
  
        if (sk)
@@ -841,7 +821,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
  
        if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) {
                skp = smack_from_secattr(&secattr, ssp);
-@@ -4170,7 +4163,7 @@ static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family,
+@@ -4170,7 +4163,7 @@ static struct smack_known *smack_from_ne
   */
  static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
  {
@@ -850,7 +830,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smack_known *skp = NULL;
        int rc = 0;
        struct smk_audit_info ad;
-@@ -4274,7 +4267,7 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
+@@ -4274,7 +4267,7 @@ static int smack_socket_getpeersec_strea
        u32 slen = 1;
        int rc = 0;
  
@@ -859,7 +839,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        if (ssp->smk_packet != NULL) {
                rcp = ssp->smk_packet->smk_known;
                slen = strlen(rcp) + 1;
-@@ -4324,7 +4317,7 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
+@@ -4324,7 +4317,7 @@ static int smack_socket_getpeersec_dgram
  
        switch (family) {
        case PF_UNIX:
@@ -868,7 +848,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
                s = ssp->smk_out->smk_secid;
                break;
        case PF_INET:
-@@ -4373,7 +4366,7 @@ static void smack_sock_graft(struct sock *sk, struct socket *parent)
+@@ -4373,7 +4366,7 @@ static void smack_sock_graft(struct sock
            (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
                return;
  
@@ -877,7 +857,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        ssp->smk_in = skp;
        ssp->smk_out = skp;
        /* cssp->smk_packet is already set in smack_inet_csk_clone() */
-@@ -4393,7 +4386,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+@@ -4393,7 +4386,7 @@ static int smack_inet_conn_request(const
  {
        u16 family = sk->sk_family;
        struct smack_known *skp;
@@ -886,7 +866,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct sockaddr_in addr;
        struct iphdr *hdr;
        struct smack_known *hskp;
-@@ -4479,7 +4472,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
+@@ -4479,7 +4472,7 @@ static int smack_inet_conn_request(const
  static void smack_inet_csk_clone(struct sock *sk,
                                 const struct request_sock *req)
  {
@@ -895,7 +875,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        struct smack_known *skp;
  
        if (req->peer_secid != 0) {
-@@ -5049,6 +5042,7 @@ struct lsm_blob_sizes smack_blob_sizes __ro_after_init = {
+@@ -5049,6 +5042,7 @@ struct lsm_blob_sizes smack_blob_sizes _
        .lbs_inode = sizeof(struct inode_smack),
        .lbs_ipc = sizeof(struct smack_known *),
        .lbs_msg_msg = sizeof(struct smack_known *),
@@ -903,7 +883,7 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        .lbs_superblock = sizeof(struct superblock_smack),
        .lbs_xattr_count = SMACK_INODE_INIT_XATTRS,
  };
-@@ -5173,7 +5167,9 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
+@@ -5173,7 +5167,9 @@ static struct security_hook_list smack_h
        LSM_HOOK_INIT(socket_getpeersec_stream, smack_socket_getpeersec_stream),
        LSM_HOOK_INIT(socket_getpeersec_dgram, smack_socket_getpeersec_dgram),
        LSM_HOOK_INIT(sk_alloc_security, smack_sk_alloc_security),
@@ -913,11 +893,9 @@ index 002a1b9ed83a5..6ec9a40f3ec59 100644
        LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security),
        LSM_HOOK_INIT(sock_graft, smack_sock_graft),
        LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request),
-diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
-index b945c1d3a7431..bad71b7e648da 100644
 --- a/security/smack/smack_netfilter.c
 +++ b/security/smack/smack_netfilter.c
-@@ -26,8 +26,8 @@ static unsigned int smack_ip_output(void *priv,
+@@ -26,8 +26,8 @@ static unsigned int smack_ip_output(void
        struct socket_smack *ssp;
        struct smack_known *skp;
  
@@ -928,6 +906,3 @@ index b945c1d3a7431..bad71b7e648da 100644
                skp = ssp->smk_out;
                skb->secmark = skp->smk_secid;
        }
--- 
-2.43.0
-
diff --git a/queue-6.11/mm-change-vmf_anon_prepare-to-__vmf_anon_prepare.patch b/queue-6.11/mm-change-vmf_anon_prepare-to-__vmf_anon_prepare.patch
new file mode 100644 (file)
index 0000000..3055a21
--- /dev/null
@@ -0,0 +1,85 @@
+From 2a058ab3286d6475b2082b90c2d2182d2fea4b39 Mon Sep 17 00:00:00 2001
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Date: Sat, 14 Sep 2024 12:41:18 -0700
+Subject: mm: change vmf_anon_prepare() to __vmf_anon_prepare()
+
+From: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+
+commit 2a058ab3286d6475b2082b90c2d2182d2fea4b39 upstream.
+
+Some callers of vmf_anon_prepare() may not want us to release the per-VMA
+lock ourselves.  Rename vmf_anon_prepare() to __vmf_anon_prepare() and let
+the callers drop the lock when desired.
+
+Also, make vmf_anon_prepare() a wrapper that releases the per-VMA lock
+itself for any callers that don't care.
+
+This is in preparation to fix this bug reported by syzbot:
+https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
+
+Link: https://lkml.kernel.org/r/20240914194243.245-1-vishal.moola@gmail.com
+Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
+Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/internal.h |   11 ++++++++++-
+ mm/memory.c   |    8 +++-----
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -310,7 +310,16 @@ static inline void wake_throttle_isolate
+               wake_up(wqh);
+ }
+-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
++vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
++static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
++{
++      vm_fault_t ret = __vmf_anon_prepare(vmf);
++
++      if (unlikely(ret & VM_FAULT_RETRY))
++              vma_end_read(vmf->vma);
++      return ret;
++}
++
+ vm_fault_t do_swap_page(struct vm_fault *vmf);
+ void folio_rotate_reclaimable(struct folio *folio);
+ bool __folio_end_writeback(struct folio *folio);
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3276,7 +3276,7 @@ static inline vm_fault_t vmf_can_call_fa
+ }
+ /**
+- * vmf_anon_prepare - Prepare to handle an anonymous fault.
++ * __vmf_anon_prepare - Prepare to handle an anonymous fault.
+  * @vmf: The vm_fault descriptor passed from the fault handler.
+  *
+  * When preparing to insert an anonymous page into a VMA from a
+@@ -3290,7 +3290,7 @@ static inline vm_fault_t vmf_can_call_fa
+  * Return: 0 if fault handling can proceed.  Any other value should be
+  * returned to the caller.
+  */
+-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
++vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
+ {
+       struct vm_area_struct *vma = vmf->vma;
+       vm_fault_t ret = 0;
+@@ -3298,10 +3298,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fa
+       if (likely(vma->anon_vma))
+               return 0;
+       if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+-              if (!mmap_read_trylock(vma->vm_mm)) {
+-                      vma_end_read(vma);
++              if (!mmap_read_trylock(vma->vm_mm))
+                       return VM_FAULT_RETRY;
+-              }
+       }
+       if (__anon_vma_prepare(vma))
+               ret = VM_FAULT_OOM;
diff --git a/queue-6.11/mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch b/queue-6.11/mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch
new file mode 100644 (file)
index 0000000..2458344
--- /dev/null
@@ -0,0 +1,47 @@
+From fb497d6db7c19c797cbd694b52d1af87c4eebcc6 Mon Sep 17 00:00:00 2001
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Date: Wed, 4 Sep 2024 17:12:04 -0700
+Subject: mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
+
+From: Liam R. Howlett <Liam.Howlett@oracle.com>
+
+commit fb497d6db7c19c797cbd694b52d1af87c4eebcc6 upstream.
+
+Traversing VMAs of a given maple tree should be protected by rcu read
+lock.  However, __damon_va_three_regions() is not doing the protection.
+Hold the lock.
+
+Link: https://lkml.kernel.org/r/20240905001204.1481-1-sj@kernel.org
+Fixes: d0cf3dd47f0d ("damon: convert __damon_va_three_regions to use the VMA iterator")
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Closes: https://lore.kernel.org/b83651a0-5b24-4206-b860-cb54ffdf209b@roeck-us.net
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/vaddr.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -126,6 +126,7 @@ static int __damon_va_three_regions(stru
+        * If this is too slow, it can be optimised to examine the maple
+        * tree gaps.
+        */
++      rcu_read_lock();
+       for_each_vma(vmi, vma) {
+               unsigned long gap;
+@@ -146,6 +147,7 @@ static int __damon_va_three_regions(stru
+ next:
+               prev = vma;
+       }
++      rcu_read_unlock();
+       if (!sz_range(&second_gap) || !sz_range(&first_gap))
+               return -EINVAL;
diff --git a/queue-6.11/mm-huge_memory-ensure-huge_zero_folio-won-t-have-large_rmappable-flag-set.patch b/queue-6.11/mm-huge_memory-ensure-huge_zero_folio-won-t-have-large_rmappable-flag-set.patch
new file mode 100644 (file)
index 0000000..75e1780
--- /dev/null
@@ -0,0 +1,35 @@
+From 2a1b8648d9be9f37f808a36c0f74adb8c53d06e6 Mon Sep 17 00:00:00 2001
+From: Miaohe Lin <linmiaohe@huawei.com>
+Date: Sat, 14 Sep 2024 09:53:06 +0800
+Subject: mm/huge_memory: ensure huge_zero_folio won't have large_rmappable flag set
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+commit 2a1b8648d9be9f37f808a36c0f74adb8c53d06e6 upstream.
+
+Ensure huge_zero_folio won't have large_rmappable flag set.  So it can be
+reported as thp,zero correctly through stable_page_flags().
+
+Link: https://lkml.kernel.org/r/20240914015306.3656791-1-linmiaohe@huawei.com
+Fixes: 5691753d73a2 ("mm: convert huge_zero_page to huge_zero_folio")
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -220,6 +220,8 @@ retry:
+               count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
+               return false;
+       }
++      /* Ensure zero folio won't have large_rmappable flag set. */
++      folio_clear_large_rmappable(zero_folio);
+       preempt_disable();
+       if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
+               preempt_enable();
diff --git a/queue-6.11/mm-hugetlb.c-fix-uaf-of-vma-in-hugetlb-fault-pathway.patch b/queue-6.11/mm-hugetlb.c-fix-uaf-of-vma-in-hugetlb-fault-pathway.patch
new file mode 100644 (file)
index 0000000..0b74d1a
--- /dev/null
@@ -0,0 +1,80 @@
+From 98b74bb4d7e96b4da5ef3126511febe55b76b807 Mon Sep 17 00:00:00 2001
+From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
+Date: Sat, 14 Sep 2024 12:41:19 -0700
+Subject: mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
+
+From: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+
+commit 98b74bb4d7e96b4da5ef3126511febe55b76b807 upstream.
+
+Syzbot reports a UAF in hugetlb_fault().  This happens because
+vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA
+to be freed before hugetlb_vma_unlock_read() is called.
+
+We can fix this by using a modified version of vmf_anon_prepare() that
+doesn't release the VMA lock on failure, and then release it ourselves
+after hugetlb_vma_unlock_read().
+
+Link: https://lkml.kernel.org/r/20240914194243.245-2-vishal.moola@gmail.com
+Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
+Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
+Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |   20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6076,7 +6076,7 @@ retry_avoidcopy:
+        * When the original hugepage is shared one, it does not have
+        * anon_vma prepared.
+        */
+-      ret = vmf_anon_prepare(vmf);
++      ret = __vmf_anon_prepare(vmf);
+       if (unlikely(ret))
+               goto out_release_all;
+@@ -6275,7 +6275,7 @@ static vm_fault_t hugetlb_no_page(struct
+               }
+               if (!(vma->vm_flags & VM_MAYSHARE)) {
+-                      ret = vmf_anon_prepare(vmf);
++                      ret = __vmf_anon_prepare(vmf);
+                       if (unlikely(ret))
+                               goto out;
+               }
+@@ -6406,6 +6406,14 @@ static vm_fault_t hugetlb_no_page(struct
+       folio_unlock(folio);
+ out:
+       hugetlb_vma_unlock_read(vma);
++
++      /*
++       * We must check to release the per-VMA lock. __vmf_anon_prepare() is
++       * the only way ret can be set to VM_FAULT_RETRY.
++       */
++      if (unlikely(ret & VM_FAULT_RETRY))
++              vma_end_read(vma);
++
+       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+       return ret;
+@@ -6627,6 +6635,14 @@ out_ptl:
+       }
+ out_mutex:
+       hugetlb_vma_unlock_read(vma);
++
++      /*
++       * We must check to release the per-VMA lock. __vmf_anon_prepare() in
++       * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
++       */
++      if (unlikely(ret & VM_FAULT_RETRY))
++              vma_end_read(vma);
++
+       mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+       /*
+        * Generally it's safe to hold refcount during waiting page lock. But
diff --git a/queue-6.11/mm-hugetlb_vmemmap-batch-hvo-work-when-demoting.patch b/queue-6.11/mm-hugetlb_vmemmap-batch-hvo-work-when-demoting.patch
new file mode 100644 (file)
index 0000000..31a59d5
--- /dev/null
@@ -0,0 +1,264 @@
+From c0f398c3b2cf67976bca216f80668b9c93368385 Mon Sep 17 00:00:00 2001
+From: Yu Zhao <yuzhao@google.com>
+Date: Mon, 12 Aug 2024 16:48:23 -0600
+Subject: mm/hugetlb_vmemmap: batch HVO work when demoting
+
+From: Yu Zhao <yuzhao@google.com>
+
+commit c0f398c3b2cf67976bca216f80668b9c93368385 upstream.
+
+Batch the HVO work, including de-HVO of the source and HVO of the
+destination hugeTLB folios, to speed up demotion.
+
+After commit bd225530a4c7 ("mm/hugetlb_vmemmap: fix race with speculative
+PFN walkers"), each request of HVO or de-HVO, batched or not, invokes
+synchronize_rcu() once.  For example, when not batched, demoting one 1GB
+hugeTLB folio to 512 2MB hugeTLB folios invokes synchronize_rcu() 513
+times (1 de-HVO plus 512 HVO requests), whereas when batched, only twice
+(1 de-HVO plus 1 HVO request).  And the performance difference between the
+two cases is significant, e.g.,
+
+  echo 2048kB >/sys/kernel/mm/hugepages/hugepages-1048576kB/demote_size
+  time echo 100 >/sys/kernel/mm/hugepages/hugepages-1048576kB/demote
+
+Before this patch:
+  real     8m58.158s
+  user     0m0.009s
+  sys      0m5.900s
+
+After this patch:
+  real     0m0.900s
+  user     0m0.000s
+  sys      0m0.851s
+
+Note that this patch changes the behavior of the `demote` interface when
+de-HVO fails.  Before, the interface aborts immediately upon failure; now,
+it tries to finish an entire batch, meaning it can make extra progress if
+the rest of the batch contains folios that do not need to de-HVO.
+
+Link: https://lkml.kernel.org/r/20240812224823.3914837-1-yuzhao@google.com
+Fixes: bd225530a4c7 ("mm/hugetlb_vmemmap: fix race with speculative PFN walkers")
+Signed-off-by: Yu Zhao <yuzhao@google.com>
+Reviewed-by: Muchun Song <muchun.song@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |  156 ++++++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 92 insertions(+), 64 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3921,101 +3921,125 @@ out:
+       return 0;
+ }
+-static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
++static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
++                                     struct list_head *src_list)
+ {
+-      int i, nid = folio_nid(folio);
+-      struct hstate *target_hstate;
+-      struct page *subpage;
+-      struct folio *inner_folio;
+-      int rc = 0;
++      long rc;
++      struct folio *folio, *next;
++      LIST_HEAD(dst_list);
++      LIST_HEAD(ret_list);
+-      target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
+-
+-      remove_hugetlb_folio(h, folio, false);
+-      spin_unlock_irq(&hugetlb_lock);
+-
+-      /*
+-       * If vmemmap already existed for folio, the remove routine above would
+-       * have cleared the hugetlb folio flag.  Hence the folio is technically
+-       * no longer a hugetlb folio.  hugetlb_vmemmap_restore_folio can only be
+-       * passed hugetlb folios and will BUG otherwise.
+-       */
+-      if (folio_test_hugetlb(folio)) {
+-              rc = hugetlb_vmemmap_restore_folio(h, folio);
+-              if (rc) {
+-                      /* Allocation of vmemmmap failed, we can not demote folio */
+-                      spin_lock_irq(&hugetlb_lock);
+-                      add_hugetlb_folio(h, folio, false);
+-                      return rc;
+-              }
+-      }
+-
+-      /*
+-       * Use destroy_compound_hugetlb_folio_for_demote for all huge page
+-       * sizes as it will not ref count folios.
+-       */
+-      destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
++      rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
++      list_splice_init(&ret_list, src_list);
+       /*
+        * Taking target hstate mutex synchronizes with set_max_huge_pages.
+        * Without the mutex, pages added to target hstate could be marked
+        * as surplus.
+        *
+-       * Note that we already hold h->resize_lock.  To prevent deadlock,
++       * Note that we already hold src->resize_lock.  To prevent deadlock,
+        * use the convention of always taking larger size hstate mutex first.
+        */
+-      mutex_lock(&target_hstate->resize_lock);
+-      for (i = 0; i < pages_per_huge_page(h);
+-                              i += pages_per_huge_page(target_hstate)) {
+-              subpage = folio_page(folio, i);
+-              inner_folio = page_folio(subpage);
+-              if (hstate_is_gigantic(target_hstate))
+-                      prep_compound_gigantic_folio_for_demote(inner_folio,
+-                                                      target_hstate->order);
+-              else
+-                      prep_compound_page(subpage, target_hstate->order);
+-              folio_change_private(inner_folio, NULL);
+-              prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
+-              free_huge_folio(inner_folio);
++      mutex_lock(&dst->resize_lock);
++
++      list_for_each_entry_safe(folio, next, src_list, lru) {
++              int i;
++
++              if (folio_test_hugetlb_vmemmap_optimized(folio))
++                      continue;
++
++              list_del(&folio->lru);
++              /*
++               * Use destroy_compound_hugetlb_folio_for_demote for all huge page
++               * sizes as it will not ref count folios.
++               */
++              destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(src));
++
++              for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
++                      struct page *page = folio_page(folio, i);
++
++                      if (hstate_is_gigantic(dst))
++                              prep_compound_gigantic_folio_for_demote(page_folio(page),
++                                                                      dst->order);
++                      else
++                              prep_compound_page(page, dst->order);
++                      set_page_private(page, 0);
++
++                      init_new_hugetlb_folio(dst, page_folio(page));
++                      list_add(&page->lru, &dst_list);
++              }
+       }
+-      mutex_unlock(&target_hstate->resize_lock);
+-      spin_lock_irq(&hugetlb_lock);
++      prep_and_add_allocated_folios(dst, &dst_list);
+-      /*
+-       * Not absolutely necessary, but for consistency update max_huge_pages
+-       * based on pool changes for the demoted page.
+-       */
+-      h->max_huge_pages--;
+-      target_hstate->max_huge_pages +=
+-              pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
++      mutex_unlock(&dst->resize_lock);
+       return rc;
+ }
+-static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
++static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
++                                unsigned long nr_to_demote)
+       __must_hold(&hugetlb_lock)
+ {
+       int nr_nodes, node;
+-      struct folio *folio;
++      struct hstate *dst;
++      long rc = 0;
++      long nr_demoted = 0;
+       lockdep_assert_held(&hugetlb_lock);
+       /* We should never get here if no demote order */
+-      if (!h->demote_order) {
++      if (!src->demote_order) {
+               pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
+               return -EINVAL;         /* internal error */
+       }
++      dst = size_to_hstate(PAGE_SIZE << src->demote_order);
+-      for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
+-              list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
++      for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
++              LIST_HEAD(list);
++              struct folio *folio, *next;
++
++              list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
+                       if (folio_test_hwpoison(folio))
+                               continue;
+-                      return demote_free_hugetlb_folio(h, folio);
++
++                      remove_hugetlb_folio(src, folio, false);
++                      list_add(&folio->lru, &list);
++
++                      if (++nr_demoted == nr_to_demote)
++                              break;
++              }
++
++              spin_unlock_irq(&hugetlb_lock);
++
++              rc = demote_free_hugetlb_folios(src, dst, &list);
++
++              spin_lock_irq(&hugetlb_lock);
++
++              list_for_each_entry_safe(folio, next, &list, lru) {
++                      list_del(&folio->lru);
++                      add_hugetlb_folio(src, folio, false);
++
++                      nr_demoted--;
+               }
++
++              if (rc < 0 || nr_demoted == nr_to_demote)
++                      break;
+       }
+       /*
++       * Not absolutely necessary, but for consistency update max_huge_pages
++       * based on pool changes for the demoted page.
++       */
++      src->max_huge_pages -= nr_demoted;
++      dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
++
++      if (rc < 0)
++              return rc;
++
++      if (nr_demoted)
++              return nr_demoted;
++      /*
+        * Only way to get here is if all pages on free lists are poisoned.
+        * Return -EBUSY so that caller will not retry.
+        */
+@@ -4249,6 +4273,8 @@ static ssize_t demote_store(struct kobje
+       spin_lock_irq(&hugetlb_lock);
+       while (nr_demote) {
++              long rc;
++
+               /*
+                * Check for available pages to demote each time thorough the
+                * loop as demote_pool_huge_page will drop hugetlb_lock.
+@@ -4261,11 +4287,13 @@ static ssize_t demote_store(struct kobje
+               if (!nr_available)
+                       break;
+-              err = demote_pool_huge_page(h, n_mask);
+-              if (err)
++              rc = demote_pool_huge_page(h, n_mask, nr_demote);
++              if (rc < 0) {
++                      err = rc;
+                       break;
++              }
+-              nr_demote--;
++              nr_demote -= rc;
+       }
+       spin_unlock_irq(&hugetlb_lock);
diff --git a/queue-6.11/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch b/queue-6.11/mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
new file mode 100644 (file)
index 0000000..0d45f1a
--- /dev/null
@@ -0,0 +1,51 @@
+From 69b50d4351ed924f29e3d46b159e28f70dfc707f Mon Sep 17 00:00:00 2001
+From: David Gow <davidgow@google.com>
+Date: Sat, 3 Aug 2024 15:46:41 +0800
+Subject: mm: only enforce minimum stack gap size if it's sensible
+
+From: David Gow <davidgow@google.com>
+
+commit 69b50d4351ed924f29e3d46b159e28f70dfc707f upstream.
+
+The generic mmap_base code tries to leave a gap between the top of the
+stack and the mmap base address, but enforces a minimum gap size (MIN_GAP)
+of 128MB, which is too large on some setups.  In particular, on arm tasks
+without ADDR_LIMIT_32BIT, the STACK_TOP value is less than 128MB, so it's
+impossible to fit such a gap in.
+
+Only enforce this minimum if MIN_GAP < MAX_GAP, as we'd prefer to honour
+MAX_GAP, which is defined proportionally, so scales better and always
+leaves us with both _some_ stack space and some room for mmap.
+
+This fixes the usercopy KUnit test suite on 32-bit arm, as it doesn't set
+any personality flags so gets the default (in this case 26-bit) task size.
+This test can be run with: ./tools/testing/kunit/kunit.py run --arch arm
+usercopy --make_options LLVM=1
+
+Link: https://lkml.kernel.org/r/20240803074642.1849623-2-davidgow@google.com
+Fixes: dba79c3df4a2 ("arm: use generic mmap top-down layout and brk randomization")
+Signed-off-by: David Gow <davidgow@google.com>
+Reviewed-by: Kees Cook <kees@kernel.org>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Linus Walleij <linus.walleij@linaro.org>
+Cc: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/util.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -463,7 +463,7 @@ static unsigned long mmap_base(unsigned
+       if (gap + pad > gap)
+               gap += pad;
+-      if (gap < MIN_GAP)
++      if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
diff --git a/queue-6.11/module-fix-kcov-ignored-file-name.patch b/queue-6.11/module-fix-kcov-ignored-file-name.patch
new file mode 100644 (file)
index 0000000..4f0db74
--- /dev/null
@@ -0,0 +1,36 @@
+From f34d086fb7102fec895fd58b9e816b981b284c17 Mon Sep 17 00:00:00 2001
+From: Dmitry Vyukov <dvyukov@google.com>
+Date: Tue, 11 Jun 2024 09:50:32 +0200
+Subject: module: Fix KCOV-ignored file name
+
+From: Dmitry Vyukov <dvyukov@google.com>
+
+commit f34d086fb7102fec895fd58b9e816b981b284c17 upstream.
+
+module.c was renamed to main.c, but the Makefile directive was copy-pasted
+verbatim with the old file name.  Fix up the file name.
+
+Fixes: cfc1d277891e ("module: Move all into module/")
+Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Alexander Potapenko <glider@google.com>
+Reviewed-by: Marco Elver <elver@google.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/bc0cf790b4839c5e38e2fafc64271f620568a39e.1718092070.git.dvyukov@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/module/Makefile |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/module/Makefile
++++ b/kernel/module/Makefile
+@@ -5,7 +5,7 @@
+ # These are called from save_stack_trace() on slub debug path,
+ # and produce insane amounts of uninteresting coverage.
+-KCOV_INSTRUMENT_module.o := n
++KCOV_INSTRUMENT_main.o := n
+ obj-y += main.o
+ obj-y += strict_rwx.o
diff --git a/queue-6.11/s390-ftrace-avoid-calling-unwinder-in-ftrace_return_address.patch b/queue-6.11/s390-ftrace-avoid-calling-unwinder-in-ftrace_return_address.patch
new file mode 100644 (file)
index 0000000..d08c2df
--- /dev/null
@@ -0,0 +1,103 @@
+From a84dd0d8ae24bdc6da341187fc4c1a0adfce2ccc Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Sat, 24 Aug 2024 02:14:04 +0200
+Subject: s390/ftrace: Avoid calling unwinder in ftrace_return_address()
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit a84dd0d8ae24bdc6da341187fc4c1a0adfce2ccc upstream.
+
+ftrace_return_address() is called extremely often from
+performance-critical code paths when debugging features like
+CONFIG_TRACE_IRQFLAGS are enabled. For example, with debug_defconfig,
+ftrace selftests on my LPAR currently execute ftrace_return_address()
+as follows:
+
+ftrace_return_address(0) - 0 times (common code uses __builtin_return_address(0) instead)
+ftrace_return_address(1) - 2,986,805,401 times (with this patch applied)
+ftrace_return_address(2) - 140 times
+ftrace_return_address(>2) - 0 times
+
+The use of __builtin_return_address(n) was replaced by return_address()
+with an unwinder call by commit cae74ba8c295 ("s390/ftrace:
+Use unwinder instead of __builtin_return_address()") because
+__builtin_return_address(n) simply walks the stack backchain and doesn't
+check for reaching the stack top. For shallow stacks with fewer than
+"n" frames, this results in reads at low addresses and random
+memory accesses.
+
+While calling the fully functional unwinder "works", it is very slow
+for this purpose. Moreover, potentially following stack switches and
+walking past IRQ context is simply wrong thing to do for
+ftrace_return_address().
+
+Reimplement return_address() to essentially be __builtin_return_address(n)
+with checks for reaching the stack top. Since the ftrace_return_address(n)
+argument is always a constant, keep the implementation in the header,
+allowing both GCC and Clang to unroll the loop and optimize it to the
+bare minimum.
+
+Fixes: cae74ba8c295 ("s390/ftrace: Use unwinder instead of __builtin_return_address()")
+Cc: stable@vger.kernel.org
+Reported-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Acked-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/ftrace.h |   17 ++++++++++++++++-
+ arch/s390/kernel/stacktrace.c  |   19 -------------------
+ 2 files changed, 16 insertions(+), 20 deletions(-)
+
+--- a/arch/s390/include/asm/ftrace.h
++++ b/arch/s390/include/asm/ftrace.h
+@@ -6,8 +6,23 @@
+ #define MCOUNT_INSN_SIZE      6
+ #ifndef __ASSEMBLY__
++#include <asm/stacktrace.h>
+-unsigned long return_address(unsigned int n);
++static __always_inline unsigned long return_address(unsigned int n)
++{
++      struct stack_frame *sf;
++
++      if (!n)
++              return (unsigned long)__builtin_return_address(0);
++
++      sf = (struct stack_frame *)current_frame_address();
++      do {
++              sf = (struct stack_frame *)sf->back_chain;
++              if (!sf)
++                      return 0;
++      } while (--n);
++      return sf->gprs[8];
++}
+ #define ftrace_return_address(n) return_address(n)
+ void ftrace_caller(void);
+--- a/arch/s390/kernel/stacktrace.c
++++ b/arch/s390/kernel/stacktrace.c
+@@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_co
+ {
+       arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
+ }
+-
+-unsigned long return_address(unsigned int n)
+-{
+-      struct unwind_state state;
+-      unsigned long addr;
+-
+-      /* Increment to skip current stack entry */
+-      n++;
+-
+-      unwind_for_each_frame(&state, NULL, NULL, 0) {
+-              addr = unwind_get_return_address(&state);
+-              if (!addr)
+-                      break;
+-              if (!n--)
+-                      return addr;
+-      }
+-      return 0;
+-}
+-EXPORT_SYMBOL_GPL(return_address);
diff --git a/queue-6.11/selftest-mm-mseal-fix-test_seal_mremap_move_dontunmap_anyaddr.patch b/queue-6.11/selftest-mm-mseal-fix-test_seal_mremap_move_dontunmap_anyaddr.patch
new file mode 100644 (file)
index 0000000..7d9d5b5
--- /dev/null
@@ -0,0 +1,199 @@
+From 072cd213b75eb01fcf40eff898f8d5c008ce1457 Mon Sep 17 00:00:00 2001
+From: Jeff Xu <jeffxu@chromium.org>
+Date: Wed, 7 Aug 2024 21:23:20 +0000
+Subject: selftest mm/mseal: fix test_seal_mremap_move_dontunmap_anyaddr
+
+From: Jeff Xu <jeffxu@chromium.org>
+
+commit 072cd213b75eb01fcf40eff898f8d5c008ce1457 upstream.
+
+the syscall remap accepts following:
+
+mremap(src, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, dst)
+
+when the src is sealed, the call will fail with error code:
+EPERM
+
+Previously, the test uses hard-coded 0xdeaddead as dst, and it
+will fail on the system with newer glibc installed.
+
+This patch removes test's dependency on glibc for mremap(), also
+fix the test and remove the hardcoded address.
+
+Link: https://lkml.kernel.org/r/20240807212320.2831848-1-jeffxu@chromium.org
+Fixes: 4926c7a52de7 ("selftest mm/mseal memory sealing")
+Signed-off-by: Jeff Xu <jeffxu@chromium.org>
+Reported-by: Pedro Falcato <pedro.falcato@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/mm/mseal_test.c |   57 ++++++++++++++++++++------------
+ 1 file changed, 36 insertions(+), 21 deletions(-)
+
+--- a/tools/testing/selftests/mm/mseal_test.c
++++ b/tools/testing/selftests/mm/mseal_test.c
+@@ -99,6 +99,16 @@ static int sys_madvise(void *start, size
+       return sret;
+ }
++static void *sys_mremap(void *addr, size_t old_len, size_t new_len,
++      unsigned long flags, void *new_addr)
++{
++      void *sret;
++
++      errno = 0;
++      sret = (void *) syscall(__NR_mremap, addr, old_len, new_len, flags, new_addr);
++      return sret;
++}
++
+ static int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
+ {
+       int ret = syscall(__NR_pkey_alloc, flags, init_val);
+@@ -1104,12 +1114,12 @@ static void test_seal_mremap_shrink(bool
+       }
+       /* shrink from 4 pages to 2 pages. */
+-      ret2 = mremap(ptr, size, 2 * page_size, 0, 0);
++      ret2 = sys_mremap(ptr, size, 2 * page_size, 0, 0);
+       if (seal) {
+-              FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
++              FAIL_TEST_IF_FALSE(ret2 == (void *) MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+       } else {
+-              FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
++              FAIL_TEST_IF_FALSE(ret2 != (void *) MAP_FAILED);
+       }
+@@ -1136,7 +1146,7 @@ static void test_seal_mremap_expand(bool
+       }
+       /* expand from 2 page to 4 pages. */
+-      ret2 = mremap(ptr, 2 * page_size, 4 * page_size, 0, 0);
++      ret2 = sys_mremap(ptr, 2 * page_size, 4 * page_size, 0, 0);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+@@ -1169,7 +1179,7 @@ static void test_seal_mremap_move(bool s
+       }
+       /* move from ptr to fixed address. */
+-      ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr);
++      ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+@@ -1288,7 +1298,7 @@ static void test_seal_mremap_shrink_fixe
+       }
+       /* mremap to move and shrink to fixed address */
+-      ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
++      ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+                       newAddr);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+@@ -1319,7 +1329,7 @@ static void test_seal_mremap_expand_fixe
+       }
+       /* mremap to move and expand to fixed address */
+-      ret2 = mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED,
++      ret2 = sys_mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED,
+                       newAddr);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+@@ -1350,7 +1360,7 @@ static void test_seal_mremap_move_fixed(
+       }
+       /* mremap to move to fixed address */
+-      ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr);
++      ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+@@ -1379,14 +1389,13 @@ static void test_seal_mremap_move_fixed_
+       /*
+        * MREMAP_FIXED can move the mapping to zero address
+        */
+-      ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
++      ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED,
+                       0);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+       } else {
+               FAIL_TEST_IF_FALSE(ret2 == 0);
+-
+       }
+       REPORT_TEST_PASS();
+@@ -1409,13 +1418,13 @@ static void test_seal_mremap_move_dontun
+       }
+       /* mremap to move, and don't unmap src addr. */
+-      ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0);
++      ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+       } else {
++              /* kernel will allocate a new address */
+               FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
+-
+       }
+       REPORT_TEST_PASS();
+@@ -1423,7 +1432,7 @@ static void test_seal_mremap_move_dontun
+ static void test_seal_mremap_move_dontunmap_anyaddr(bool seal)
+ {
+-      void *ptr;
++      void *ptr, *ptr2;
+       unsigned long page_size = getpagesize();
+       unsigned long size = 4 * page_size;
+       int ret;
+@@ -1438,24 +1447,30 @@ static void test_seal_mremap_move_dontun
+       }
+       /*
+-       * The 0xdeaddead should not have effect on dest addr
+-       * when MREMAP_DONTUNMAP is set.
++       * The new address is any address that not allocated.
++       * use allocate/free to similate that.
++       */
++      setup_single_address(size, &ptr2);
++      FAIL_TEST_IF_FALSE(ptr2 != (void *)-1);
++      ret = sys_munmap(ptr2, size);
++      FAIL_TEST_IF_FALSE(!ret);
++
++      /*
++       * remap to any address.
+        */
+-      ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
+-                      0xdeaddead);
++      ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP,
++                      (void *) ptr2);
+       if (seal) {
+               FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED);
+               FAIL_TEST_IF_FALSE(errno == EPERM);
+       } else {
+-              FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED);
+-              FAIL_TEST_IF_FALSE((long)ret2 != 0xdeaddead);
+-
++              /* remap success and return ptr2 */
++              FAIL_TEST_IF_FALSE(ret2 ==  ptr2);
+       }
+       REPORT_TEST_PASS();
+ }
+-
+ static void test_seal_merge_and_split(void)
+ {
+       void *ptr;
index b926dfdd93d8c07e581c1abbd28824d2c6218070..974aa0fca33101411d827f1a77bf440601e8f1a8 100644 (file)
@@ -671,3 +671,23 @@ thermal-sysfs-get-to-trips-via-attribute-pointers.patch
 thermal-sysfs-refine-the-handling-of-trip-hysteresis.patch
 thermal-sysfs-add-sanity-checks-for-trip-temperature.patch
 lsm-infrastructure-management-of-the-sock-security.patch
+bpf-lsm-set-bpf_lsm_blob_sizes.lbs_task-to-0.patch
+dm-verity-restart-or-panic-on-an-i-o-error.patch
+compiler.h-specify-correct-attribute-for-.rodata..c_jump_table.patch
+lockdep-fix-deadlock-issue-between-lockdep-and-rcu.patch
+exfat-resolve-memory-leak-from-exfat_create_upcase_table.patch
+mm-hugetlb_vmemmap-batch-hvo-work-when-demoting.patch
+s390-ftrace-avoid-calling-unwinder-in-ftrace_return_address.patch
+selftest-mm-mseal-fix-test_seal_mremap_move_dontunmap_anyaddr.patch
+mm-only-enforce-minimum-stack-gap-size-if-it-s-sensible.patch
+spi-fspi-add-support-for-imx8ulp.patch
+module-fix-kcov-ignored-file-name.patch
+fbdev-xen-fbfront-assign-fb_info-device.patch
+tpm-export-tpm2_sessions_init-to-fix-ibmvtpm-building.patch
+mm-hugetlb.c-fix-uaf-of-vma-in-hugetlb-fault-pathway.patch
+mm-huge_memory-ensure-huge_zero_folio-won-t-have-large_rmappable-flag-set.patch
+mm-change-vmf_anon_prepare-to-__vmf_anon_prepare.patch
+mm-damon-vaddr-protect-vma-traversal-in-__damon_va_thre_regions-with-rcu-read-lock.patch
+i2c-aspeed-update-the-stop-sw-state-when-the-bus-recovery-occurs.patch
+i2c-isch-add-missed-else.patch
+i2c-xiic-try-re-initialization-on-bus-busy-timeout.patch
diff --git a/queue-6.11/spi-fspi-add-support-for-imx8ulp.patch b/queue-6.11/spi-fspi-add-support-for-imx8ulp.patch
new file mode 100644 (file)
index 0000000..1f252aa
--- /dev/null
@@ -0,0 +1,52 @@
+From 9228956a620553d7fd17f703a37a26c91e4d92ab Mon Sep 17 00:00:00 2001
+From: Haibo Chen <haibo.chen@nxp.com>
+Date: Thu, 5 Sep 2024 17:43:37 +0800
+Subject: spi: fspi: add support for imx8ulp
+
+From: Haibo Chen <haibo.chen@nxp.com>
+
+commit 9228956a620553d7fd17f703a37a26c91e4d92ab upstream.
+
+The flexspi on imx8ulp only has 16 LUTs, different with others which
+have up to 32 LUTs.
+
+Add a separate compatible string and nxp_fspi_devtype_data to support
+flexspi on imx8ulp.
+
+Fixes: ef89fd56bdfc ("arm64: dts: imx8ulp: add flexspi node")
+Cc: stable@kernel.org
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Link: https://patch.msgid.link/20240905094338.1986871-4-haibo.chen@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi-nxp-fspi.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -371,6 +371,15 @@ static struct nxp_fspi_devtype_data imx8
+       .little_endian = true,  /* little-endian    */
+ };
++static struct nxp_fspi_devtype_data imx8ulp_data = {
++      .rxfifo = SZ_512,       /* (64  * 64 bits)  */
++      .txfifo = SZ_1K,        /* (128 * 64 bits)  */
++      .ahb_buf_size = SZ_2K,  /* (256 * 64 bits)  */
++      .quirks = 0,
++      .lut_num = 16,
++      .little_endian = true,  /* little-endian    */
++};
++
+ struct nxp_fspi {
+       void __iomem *iobase;
+       void __iomem *ahb_addr;
+@@ -1297,6 +1306,7 @@ static const struct of_device_id nxp_fsp
+       { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, },
+       { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
+       { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, },
++      { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, },
+       { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
diff --git a/queue-6.11/tpm-export-tpm2_sessions_init-to-fix-ibmvtpm-building.patch b/queue-6.11/tpm-export-tpm2_sessions_init-to-fix-ibmvtpm-building.patch
new file mode 100644 (file)
index 0000000..4bc2b31
--- /dev/null
@@ -0,0 +1,40 @@
+From f168c000d27f8134160d4a52dfc474a948a3d7e9 Mon Sep 17 00:00:00 2001
+From: Kexy Biscuit <kexybiscuit@aosc.io>
+Date: Mon, 9 Sep 2024 20:28:30 +0300
+Subject: tpm: export tpm2_sessions_init() to fix ibmvtpm building
+
+From: Kexy Biscuit <kexybiscuit@aosc.io>
+
+commit f168c000d27f8134160d4a52dfc474a948a3d7e9 upstream.
+
+Commit 08d08e2e9f0a ("tpm: ibmvtpm: Call tpm2_sessions_init() to
+initialize session support") adds call to tpm2_sessions_init() in ibmvtpm,
+which could be built as a module. However, tpm2_sessions_init() wasn't
+exported, causing libmvtpm to fail to build as a module:
+
+ERROR: modpost: "tpm2_sessions_init" [drivers/char/tpm/tpm_ibmvtpm.ko] undefined!
+
+Export tpm2_sessions_init() to resolve the issue.
+
+Cc: stable@vger.kernel.org # v6.10+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202408051735.ZJkAPQ3b-lkp@intel.com/
+Fixes: 08d08e2e9f0a ("tpm: ibmvtpm: Call tpm2_sessions_init() to initialize session support")
+Signed-off-by: Kexy Biscuit <kexybiscuit@aosc.io>
+Signed-off-by: Mingcong Bai <jeffbai@aosc.io>
+Reviewed-by: Stefan Berger <stefanb@linux.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm2-sessions.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/char/tpm/tpm2-sessions.c
++++ b/drivers/char/tpm/tpm2-sessions.c
+@@ -1362,4 +1362,5 @@ int tpm2_sessions_init(struct tpm_chip *
+       return rc;
+ }
++EXPORT_SYMBOL(tpm2_sessions_init);
+ #endif /* CONFIG_TCG_TPM2_HMAC */