]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.10
authorSasha Levin <sashal@kernel.org>
Tue, 20 Aug 2024 11:58:49 +0000 (07:58 -0400)
committerSasha Levin <sashal@kernel.org>
Tue, 20 Aug 2024 11:58:49 +0000 (07:58 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
13 files changed:
queue-6.10/arm64-fix-kasan-random-tag-seed-initialization.patch [new file with mode: 0644]
queue-6.10/block-fix-lockdep-warning-in-blk_mq_mark_tag_wait.patch [new file with mode: 0644]
queue-6.10/cpu-smt-enable-smt-only-if-a-core-is-online.patch [new file with mode: 0644]
queue-6.10/io_uring-napi-check-napi_enabled-in-io_napi_add-befo.patch [new file with mode: 0644]
queue-6.10/io_uring-napi-remove-unnecessary-s64-cast.patch [new file with mode: 0644]
queue-6.10/io_uring-napi-use-ktime-in-busy-polling.patch [new file with mode: 0644]
queue-6.10/powerpc-topology-check-if-a-core-is-online.patch [new file with mode: 0644]
queue-6.10/printk-panic-allow-cpu-backtraces-to-be-written-into.patch [new file with mode: 0644]
queue-6.10/rust-fix-the-default-format-for-config_-rustc-bindge.patch [new file with mode: 0644]
queue-6.10/rust-suppress-error-messages-from-config_-rustc-bind.patch [new file with mode: 0644]
queue-6.10/rust-work-around-bindgen-0.69.0-issue.patch [new file with mode: 0644]
queue-6.10/s390-dasd-remove-dma-alignment.patch [new file with mode: 0644]
queue-6.10/series

diff --git a/queue-6.10/arm64-fix-kasan-random-tag-seed-initialization.patch b/queue-6.10/arm64-fix-kasan-random-tag-seed-initialization.patch
new file mode 100644 (file)
index 0000000..663b3f9
--- /dev/null
@@ -0,0 +1,57 @@
+From 1e00a214bb2436c6fcce7cac2b8b5c34854058ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Aug 2024 02:09:53 -0700
+Subject: arm64: Fix KASAN random tag seed initialization
+
+From: Samuel Holland <samuel.holland@sifive.com>
+
+[ Upstream commit f75c235565f90c4a17b125e47f1c68ef6b8c2bce ]
+
+Currently, kasan_init_sw_tags() is called before setup_per_cpu_areas(),
+so per_cpu(prng_state, cpu) accesses the same address regardless of the
+value of "cpu", and the same seed value gets copied to the percpu area
+for every CPU. Fix this by moving the call to smp_prepare_boot_cpu(),
+which is the first architecture hook after setup_per_cpu_areas().
+
+Fixes: 3c9e3aa11094 ("kasan: add tag related helper functions")
+Fixes: 3f41b6093823 ("kasan: fix random seed generation for tag-based mode")
+Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Link: https://lore.kernel.org/r/20240814091005.969756-1-samuel.holland@sifive.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/setup.c | 3 ---
+ arch/arm64/kernel/smp.c   | 2 ++
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index a096e2451044d..b22d28ec80284 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -355,9 +355,6 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+       smp_init_cpus();
+       smp_build_mpidr_hash();
+-      /* Init percpu seeds for random tags after cpus are set up. */
+-      kasan_init_sw_tags();
+-
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Make sure init_thread_info.ttbr0 always generates translation
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 5de85dccc09cd..05688f6a275f1 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -469,6 +469,8 @@ void __init smp_prepare_boot_cpu(void)
+               init_gic_priority_masking();
+       kasan_init_hw_tags();
++      /* Init percpu seeds for random tags after cpus are set up. */
++      kasan_init_sw_tags();
+ }
+ /*
+-- 
+2.43.0
+
diff --git a/queue-6.10/block-fix-lockdep-warning-in-blk_mq_mark_tag_wait.patch b/queue-6.10/block-fix-lockdep-warning-in-blk_mq_mark_tag_wait.patch
new file mode 100644 (file)
index 0000000..01f24bb
--- /dev/null
@@ -0,0 +1,211 @@
+From f5b987fab4f25bf92dd9ee160ee1e78cb5ed93dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Aug 2024 10:47:36 +0800
+Subject: block: Fix lockdep warning in blk_mq_mark_tag_wait
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+[ Upstream commit b313a8c835516bdda85025500be866ac8a74e022 ]
+
+Lockdep reported a warning in Linux version 6.6:
+
+[  414.344659] ================================
+[  414.345155] WARNING: inconsistent lock state
+[  414.345658] 6.6.0-07439-gba2303cacfda #6 Not tainted
+[  414.346221] --------------------------------
+[  414.346712] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+[  414.347545] kworker/u10:3/1152 [HC0[0]:SC0[0]:HE0:SE1] takes:
+[  414.349245] ffff88810edd1098 (&sbq->ws[i].wait){+.?.}-{2:2}, at: blk_mq_dispatch_rq_list+0x131c/0x1ee0
+[  414.351204] {IN-SOFTIRQ-W} state was registered at:
+[  414.351751]   lock_acquire+0x18d/0x460
+[  414.352218]   _raw_spin_lock_irqsave+0x39/0x60
+[  414.352769]   __wake_up_common_lock+0x22/0x60
+[  414.353289]   sbitmap_queue_wake_up+0x375/0x4f0
+[  414.353829]   sbitmap_queue_clear+0xdd/0x270
+[  414.354338]   blk_mq_put_tag+0xdf/0x170
+[  414.354807]   __blk_mq_free_request+0x381/0x4d0
+[  414.355335]   blk_mq_free_request+0x28b/0x3e0
+[  414.355847]   __blk_mq_end_request+0x242/0xc30
+[  414.356367]   scsi_end_request+0x2c1/0x830
+[  414.345155] WARNING: inconsistent lock state
+[  414.345658] 6.6.0-07439-gba2303cacfda #6 Not tainted
+[  414.346221] --------------------------------
+[  414.346712] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
+[  414.347545] kworker/u10:3/1152 [HC0[0]:SC0[0]:HE0:SE1] takes:
+[  414.349245] ffff88810edd1098 (&sbq->ws[i].wait){+.?.}-{2:2}, at: blk_mq_dispatch_rq_list+0x131c/0x1ee0
+[  414.351204] {IN-SOFTIRQ-W} state was registered at:
+[  414.351751]   lock_acquire+0x18d/0x460
+[  414.352218]   _raw_spin_lock_irqsave+0x39/0x60
+[  414.352769]   __wake_up_common_lock+0x22/0x60
+[  414.353289]   sbitmap_queue_wake_up+0x375/0x4f0
+[  414.353829]   sbitmap_queue_clear+0xdd/0x270
+[  414.354338]   blk_mq_put_tag+0xdf/0x170
+[  414.354807]   __blk_mq_free_request+0x381/0x4d0
+[  414.355335]   blk_mq_free_request+0x28b/0x3e0
+[  414.355847]   __blk_mq_end_request+0x242/0xc30
+[  414.356367]   scsi_end_request+0x2c1/0x830
+[  414.356863]   scsi_io_completion+0x177/0x1610
+[  414.357379]   scsi_complete+0x12f/0x260
+[  414.357856]   blk_complete_reqs+0xba/0xf0
+[  414.358338]   __do_softirq+0x1b0/0x7a2
+[  414.358796]   irq_exit_rcu+0x14b/0x1a0
+[  414.359262]   sysvec_call_function_single+0xaf/0xc0
+[  414.359828]   asm_sysvec_call_function_single+0x1a/0x20
+[  414.360426]   default_idle+0x1e/0x30
+[  414.360873]   default_idle_call+0x9b/0x1f0
+[  414.361390]   do_idle+0x2d2/0x3e0
+[  414.361819]   cpu_startup_entry+0x55/0x60
+[  414.362314]   start_secondary+0x235/0x2b0
+[  414.362809]   secondary_startup_64_no_verify+0x18f/0x19b
+[  414.363413] irq event stamp: 428794
+[  414.363825] hardirqs last  enabled at (428793): [<ffffffff816bfd1c>] ktime_get+0x1dc/0x200
+[  414.364694] hardirqs last disabled at (428794): [<ffffffff85470177>] _raw_spin_lock_irq+0x47/0x50
+[  414.365629] softirqs last  enabled at (428444): [<ffffffff85474780>] __do_softirq+0x540/0x7a2
+[  414.366522] softirqs last disabled at (428419): [<ffffffff813f65ab>] irq_exit_rcu+0x14b/0x1a0
+[  414.367425]
+               other info that might help us debug this:
+[  414.368194]  Possible unsafe locking scenario:
+[  414.368900]        CPU0
+[  414.369225]        ----
+[  414.369548]   lock(&sbq->ws[i].wait);
+[  414.370000]   <Interrupt>
+[  414.370342]     lock(&sbq->ws[i].wait);
+[  414.370802]
+                *** DEADLOCK ***
+[  414.371569] 5 locks held by kworker/u10:3/1152:
+[  414.372088]  #0: ffff88810130e938 ((wq_completion)writeback){+.+.}-{0:0}, at: process_scheduled_works+0x357/0x13f0
+[  414.373180]  #1: ffff88810201fdb8 ((work_completion)(&(&wb->dwork)->work)){+.+.}-{0:0}, at: process_scheduled_works+0x3a3/0x13f0
+[  414.374384]  #2: ffffffff86ffbdc0 (rcu_read_lock){....}-{1:2}, at: blk_mq_run_hw_queue+0x637/0xa00
+[  414.375342]  #3: ffff88810edd1098 (&sbq->ws[i].wait){+.?.}-{2:2}, at: blk_mq_dispatch_rq_list+0x131c/0x1ee0
+[  414.376377]  #4: ffff888106205a08 (&hctx->dispatch_wait_lock){+.-.}-{2:2}, at: blk_mq_dispatch_rq_list+0x1337/0x1ee0
+[  414.378607]
+               stack backtrace:
+[  414.379177] CPU: 0 PID: 1152 Comm: kworker/u10:3 Not tainted 6.6.0-07439-gba2303cacfda #6
+[  414.380032] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
+[  414.381177] Workqueue: writeback wb_workfn (flush-253:0)
+[  414.381805] Call Trace:
+[  414.382136]  <TASK>
+[  414.382429]  dump_stack_lvl+0x91/0xf0
+[  414.382884]  mark_lock_irq+0xb3b/0x1260
+[  414.383367]  ? __pfx_mark_lock_irq+0x10/0x10
+[  414.383889]  ? stack_trace_save+0x8e/0xc0
+[  414.384373]  ? __pfx_stack_trace_save+0x10/0x10
+[  414.384903]  ? graph_lock+0xcf/0x410
+[  414.385350]  ? save_trace+0x3d/0xc70
+[  414.385808]  mark_lock.part.20+0x56d/0xa90
+[  414.386317]  mark_held_locks+0xb0/0x110
+[  414.386791]  ? __pfx_do_raw_spin_lock+0x10/0x10
+[  414.387320]  lockdep_hardirqs_on_prepare+0x297/0x3f0
+[  414.387901]  ? _raw_spin_unlock_irq+0x28/0x50
+[  414.388422]  trace_hardirqs_on+0x58/0x100
+[  414.388917]  _raw_spin_unlock_irq+0x28/0x50
+[  414.389422]  __blk_mq_tag_busy+0x1d6/0x2a0
+[  414.389920]  __blk_mq_get_driver_tag+0x761/0x9f0
+[  414.390899]  blk_mq_dispatch_rq_list+0x1780/0x1ee0
+[  414.391473]  ? __pfx_blk_mq_dispatch_rq_list+0x10/0x10
+[  414.392070]  ? sbitmap_get+0x2b8/0x450
+[  414.392533]  ? __blk_mq_get_driver_tag+0x210/0x9f0
+[  414.393095]  __blk_mq_sched_dispatch_requests+0xd99/0x1690
+[  414.393730]  ? elv_attempt_insert_merge+0x1b1/0x420
+[  414.394302]  ? __pfx___blk_mq_sched_dispatch_requests+0x10/0x10
+[  414.394970]  ? lock_acquire+0x18d/0x460
+[  414.395456]  ? blk_mq_run_hw_queue+0x637/0xa00
+[  414.395986]  ? __pfx_lock_acquire+0x10/0x10
+[  414.396499]  blk_mq_sched_dispatch_requests+0x109/0x190
+[  414.397100]  blk_mq_run_hw_queue+0x66e/0xa00
+[  414.397616]  blk_mq_flush_plug_list.part.17+0x614/0x2030
+[  414.398244]  ? __pfx_blk_mq_flush_plug_list.part.17+0x10/0x10
+[  414.398897]  ? writeback_sb_inodes+0x241/0xcc0
+[  414.399429]  blk_mq_flush_plug_list+0x65/0x80
+[  414.399957]  __blk_flush_plug+0x2f1/0x530
+[  414.400458]  ? __pfx___blk_flush_plug+0x10/0x10
+[  414.400999]  blk_finish_plug+0x59/0xa0
+[  414.401467]  wb_writeback+0x7cc/0x920
+[  414.401935]  ? __pfx_wb_writeback+0x10/0x10
+[  414.402442]  ? mark_held_locks+0xb0/0x110
+[  414.402931]  ? __pfx_do_raw_spin_lock+0x10/0x10
+[  414.403462]  ? lockdep_hardirqs_on_prepare+0x297/0x3f0
+[  414.404062]  wb_workfn+0x2b3/0xcf0
+[  414.404500]  ? __pfx_wb_workfn+0x10/0x10
+[  414.404989]  process_scheduled_works+0x432/0x13f0
+[  414.405546]  ? __pfx_process_scheduled_works+0x10/0x10
+[  414.406139]  ? do_raw_spin_lock+0x101/0x2a0
+[  414.406641]  ? assign_work+0x19b/0x240
+[  414.407106]  ? lock_is_held_type+0x9d/0x110
+[  414.407604]  worker_thread+0x6f2/0x1160
+[  414.408075]  ? __kthread_parkme+0x62/0x210
+[  414.408572]  ? lockdep_hardirqs_on_prepare+0x297/0x3f0
+[  414.409168]  ? __kthread_parkme+0x13c/0x210
+[  414.409678]  ? __pfx_worker_thread+0x10/0x10
+[  414.410191]  kthread+0x33c/0x440
+[  414.410602]  ? __pfx_kthread+0x10/0x10
+[  414.411068]  ret_from_fork+0x4d/0x80
+[  414.411526]  ? __pfx_kthread+0x10/0x10
+[  414.411993]  ret_from_fork_asm+0x1b/0x30
+[  414.412489]  </TASK>
+
+When interrupt is turned on while a lock holding by spin_lock_irq it
+throws a warning because of potential deadlock.
+
+blk_mq_prep_dispatch_rq
+ blk_mq_get_driver_tag
+  __blk_mq_get_driver_tag
+   __blk_mq_alloc_driver_tag
+    blk_mq_tag_busy -> tag is already busy
+    // failed to get driver tag
+ blk_mq_mark_tag_wait
+  spin_lock_irq(&wq->lock) -> lock A (&sbq->ws[i].wait)
+  __add_wait_queue(wq, wait) -> wait queue active
+  blk_mq_get_driver_tag
+  __blk_mq_tag_busy
+-> 1) tag must be idle, which means there can't be inflight IO
+   spin_lock_irq(&tags->lock) -> lock B (hctx->tags)
+   spin_unlock_irq(&tags->lock) -> unlock B, turn on interrupt accidentally
+-> 2) context must be preempt by IO interrupt to trigger deadlock.
+
+As shown above, the deadlock is not possible in theory, but the warning
+still need to be fixed.
+
+Fix it by using spin_lock_irqsave to get lockB instead of spin_lock_irq.
+
+Fixes: 4f1731df60f9 ("blk-mq: fix potential io hang by wrong 'wake_batch'")
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20240815024736.2040971-1-lilingfeng@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-tag.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index cc57e2dd9a0bb..2cafcf11ee8be 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -38,6 +38,7 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+       unsigned int users;
++      unsigned long flags;
+       struct blk_mq_tags *tags = hctx->tags;
+       /*
+@@ -56,11 +57,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+                       return;
+       }
+-      spin_lock_irq(&tags->lock);
++      spin_lock_irqsave(&tags->lock, flags);
+       users = tags->active_queues + 1;
+       WRITE_ONCE(tags->active_queues, users);
+       blk_mq_update_wake_batch(tags, users);
+-      spin_unlock_irq(&tags->lock);
++      spin_unlock_irqrestore(&tags->lock, flags);
+ }
+ /*
+-- 
+2.43.0
+
diff --git a/queue-6.10/cpu-smt-enable-smt-only-if-a-core-is-online.patch b/queue-6.10/cpu-smt-enable-smt-only-if-a-core-is-online.patch
new file mode 100644 (file)
index 0000000..baec4cd
--- /dev/null
@@ -0,0 +1,88 @@
+From fe4005a1634762b93e63e0ed5edca8211994b75c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 08:31:12 +0530
+Subject: cpu/SMT: Enable SMT only if a core is online
+
+From: Nysal Jan K.A <nysal@linux.ibm.com>
+
+[ Upstream commit 6c17ea1f3eaa330d445ac14a9428402ce4e3055e ]
+
+If a core is offline then enabling SMT should not online CPUs of
+this core. By enabling SMT, what is intended is either changing the SMT
+value from "off" to "on" or setting the SMT level (threads per core) from a
+lower to higher value.
+
+On PowerPC the ppc64_cpu utility can be used, among other things, to
+perform the following functions:
+
+ppc64_cpu --cores-on                # Get the number of online cores
+ppc64_cpu --cores-on=X              # Put exactly X cores online
+ppc64_cpu --offline-cores=X[,Y,...] # Put specified cores offline
+ppc64_cpu --smt={on|off|value}      # Enable, disable or change SMT level
+
+If the user has decided to offline certain cores, enabling SMT should
+not online CPUs in those cores. This patch fixes the issue and changes
+the behaviour as described, by introducing an arch specific function
+topology_is_core_online(). It is currently implemented only for PowerPC.
+
+Fixes: 73c58e7e1412 ("powerpc: Add HOTPLUG_SMT support")
+Reported-by: Tyrel Datwyler <tyreld@linux.ibm.com>
+Closes: https://groups.google.com/g/powerpc-utils-devel/c/wrwVzAAnRlI/m/5KJSoqP4BAAJ
+Signed-off-by: Nysal Jan K.A <nysal@linux.ibm.com>
+Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240731030126.956210-2-nysal@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu |  3 ++-
+ kernel/cpu.c                                       | 12 +++++++++++-
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index e7e160954e798..0579860b55299 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -562,7 +562,8 @@ Description:       Control Symmetric Multi Threading (SMT)
+                        ================ =========================================
+                        If control status is "forceoff" or "notsupported" writes
+-                       are rejected.
++                       are rejected. Note that enabling SMT on PowerPC skips
++                       offline cores.
+ What:         /sys/devices/system/cpu/cpuX/power/energy_perf_bias
+ Date:         March 2019
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 3d2bf1d50a0c4..6dee328bfe6fd 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2679,6 +2679,16 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+       return ret;
+ }
++/**
++ * Check if the core a CPU belongs to is online
++ */
++#if !defined(topology_is_core_online)
++static inline bool topology_is_core_online(unsigned int cpu)
++{
++      return true;
++}
++#endif
++
+ int cpuhp_smt_enable(void)
+ {
+       int cpu, ret = 0;
+@@ -2689,7 +2699,7 @@ int cpuhp_smt_enable(void)
+               /* Skip online CPUs and CPUs on offline nodes */
+               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+                       continue;
+-              if (!cpu_smt_thread_allowed(cpu))
++              if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
+                       continue;
+               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+               if (ret)
+-- 
+2.43.0
+
diff --git a/queue-6.10/io_uring-napi-check-napi_enabled-in-io_napi_add-befo.patch b/queue-6.10/io_uring-napi-check-napi_enabled-in-io_napi_add-befo.patch
new file mode 100644 (file)
index 0000000..6223d14
--- /dev/null
@@ -0,0 +1,54 @@
+From 02a6f0f691fb348c711a1a96fa0ece6bccf27967 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Aug 2024 14:07:11 -0400
+Subject: io_uring/napi: check napi_enabled in io_napi_add() before proceeding
+
+From: Olivier Langlois <olivier@trillion01.com>
+
+[ Upstream commit 84f2eecf95018386c145ada19bb45b03bdb80d9e ]
+
+doing so avoids the overhead of adding napi ids to all the rings that do
+not enable napi.
+
+if no id is added to napi_list because napi is disabled,
+__io_napi_busy_loop() will not be called.
+
+Signed-off-by: Olivier Langlois <olivier@trillion01.com>
+Fixes: b4ccc4dd1330 ("io_uring/napi: enable even with a timeout of 0")
+Link: https://lore.kernel.org/r/bd989ccef5fda14f5fd9888faf4fefcf66bd0369.1723400131.git.olivier@trillion01.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/napi.c | 2 +-
+ io_uring/napi.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/io_uring/napi.c b/io_uring/napi.c
+index 6bdb267e9c33c..ab5d68d4440c4 100644
+--- a/io_uring/napi.c
++++ b/io_uring/napi.c
+@@ -311,7 +311,7 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
+ {
+       iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
+-      if (!(ctx->flags & IORING_SETUP_SQPOLL) && ctx->napi_enabled)
++      if (!(ctx->flags & IORING_SETUP_SQPOLL))
+               io_napi_blocking_busy_loop(ctx, iowq);
+ }
+diff --git a/io_uring/napi.h b/io_uring/napi.h
+index babbee36cd3eb..341d010cf66bc 100644
+--- a/io_uring/napi.h
++++ b/io_uring/napi.h
+@@ -55,7 +55,7 @@ static inline void io_napi_add(struct io_kiocb *req)
+       struct io_ring_ctx *ctx = req->ctx;
+       struct socket *sock;
+-      if (!READ_ONCE(ctx->napi_busy_poll_dt))
++      if (!READ_ONCE(ctx->napi_enabled))
+               return;
+       sock = sock_from_file(req->file);
+-- 
+2.43.0
+
diff --git a/queue-6.10/io_uring-napi-remove-unnecessary-s64-cast.patch b/queue-6.10/io_uring-napi-remove-unnecessary-s64-cast.patch
new file mode 100644 (file)
index 0000000..6be985c
--- /dev/null
@@ -0,0 +1,40 @@
+From 9cb90798c9697b260a16853a0357db2494ff5b6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2024 03:05:21 +0200
+Subject: io_uring/napi: Remove unnecessary s64 cast
+
+From: Thorsten Blum <thorsten.blum@toblux.com>
+
+[ Upstream commit f7c696a56cc7d70515774a24057b473757ec6089 ]
+
+Since the do_div() macro casts the divisor to u32 anyway, remove the
+unnecessary s64 cast and fix the following Coccinelle/coccicheck
+warning reported by do_div.cocci:
+
+  WARNING: do_div() does a 64-by-32 division, please consider using div64_s64 instead
+
+Signed-off-by: Thorsten Blum <thorsten.blum@toblux.com>
+Link: https://lore.kernel.org/r/20240710010520.384009-2-thorsten.blum@toblux.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 84f2eecf9501 ("io_uring/napi: check napi_enabled in io_napi_add() before proceeding")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/napi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/io_uring/napi.c b/io_uring/napi.c
+index 080d10e0e0afd..327e5f3a8abe0 100644
+--- a/io_uring/napi.c
++++ b/io_uring/napi.c
+@@ -285,7 +285,7 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
+                       s64 poll_to_ns = timespec64_to_ns(ts);
+                       if (poll_to_ns > 0) {
+                               u64 val = poll_to_ns + 999;
+-                              do_div(val, (s64) 1000);
++                              do_div(val, 1000);
+                               poll_to = val;
+                       }
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.10/io_uring-napi-use-ktime-in-busy-polling.patch b/queue-6.10/io_uring-napi-use-ktime-in-busy-polling.patch
new file mode 100644 (file)
index 0000000..c0fe2fd
--- /dev/null
@@ -0,0 +1,212 @@
+From 4ee14031d88066739185950822fa796ce41c1024 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jul 2024 15:24:30 +0100
+Subject: io_uring/napi: use ktime in busy polling
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 342b2e395d5f34c9f111a818556e617939f83a8c ]
+
+It's more natural to use ktime/ns instead of keeping around usec,
+especially since we're comparing it against user provided timers,
+so convert napi busy poll internal handling to ktime. It's also nicer
+since the type (ktime_t vs unsigned long) now tells the unit of measure.
+
+Keep everything as ktime, which we convert to/from micro seconds for
+IORING_[UN]REGISTER_NAPI. The net/ busy polling works seems to work with
+usec, however it's not real usec as shift by 10 is used to get it from
+nsecs, see busy_loop_current_time(), so it's easy to get truncated nsec
+back and we get back better precision.
+
+Note, we can further improve it later by removing the truncation and
+maybe convincing net/ to use ktime/ns instead.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Link: https://lore.kernel.org/r/95e7ec8d095069a3ed5d40a4bc6f8b586698bc7e.1722003776.git.asml.silence@gmail.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 84f2eecf9501 ("io_uring/napi: check napi_enabled in io_napi_add() before proceeding")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/io_uring_types.h |  2 +-
+ io_uring/io_uring.h            |  2 +-
+ io_uring/napi.c                | 48 +++++++++++++++++++---------------
+ io_uring/napi.h                |  2 +-
+ 4 files changed, 30 insertions(+), 24 deletions(-)
+
+diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
+index 7abdc09271245..b18e998c8b887 100644
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -410,7 +410,7 @@ struct io_ring_ctx {
+       spinlock_t              napi_lock;      /* napi_list lock */
+       /* napi busy poll default timeout */
+-      unsigned int            napi_busy_poll_to;
++      ktime_t                 napi_busy_poll_dt;
+       bool                    napi_prefer_busy_poll;
+       bool                    napi_enabled;
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 726e6367af4d3..af46d03d58847 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -43,7 +43,7 @@ struct io_wait_queue {
+       ktime_t timeout;
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-      unsigned int napi_busy_poll_to;
++      ktime_t napi_busy_poll_dt;
+       bool napi_prefer_busy_poll;
+ #endif
+ };
+diff --git a/io_uring/napi.c b/io_uring/napi.c
+index 327e5f3a8abe0..6bdb267e9c33c 100644
+--- a/io_uring/napi.c
++++ b/io_uring/napi.c
+@@ -33,6 +33,12 @@ static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list,
+       return NULL;
+ }
++static inline ktime_t net_to_ktime(unsigned long t)
++{
++      /* napi approximating usecs, reverse busy_loop_current_time */
++      return ns_to_ktime(t << 10);
++}
++
+ void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
+ {
+       struct hlist_head *hash_list;
+@@ -102,14 +108,14 @@ static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
+               __io_napi_remove_stale(ctx);
+ }
+-static inline bool io_napi_busy_loop_timeout(unsigned long start_time,
+-                                           unsigned long bp_usec)
++static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
++                                           ktime_t bp)
+ {
+-      if (bp_usec) {
+-              unsigned long end_time = start_time + bp_usec;
+-              unsigned long now = busy_loop_current_time();
++      if (bp) {
++              ktime_t end_time = ktime_add(start_time, bp);
++              ktime_t now = net_to_ktime(busy_loop_current_time());
+-              return time_after(now, end_time);
++              return ktime_after(now, end_time);
+       }
+       return true;
+@@ -124,7 +130,8 @@ static bool io_napi_busy_loop_should_end(void *data,
+               return true;
+       if (io_should_wake(iowq) || io_has_work(iowq->ctx))
+               return true;
+-      if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
++      if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
++                                    iowq->napi_busy_poll_dt))
+               return true;
+       return false;
+@@ -181,10 +188,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
+  */
+ void io_napi_init(struct io_ring_ctx *ctx)
+ {
++      u64 sys_dt = READ_ONCE(sysctl_net_busy_poll) * NSEC_PER_USEC;
++
+       INIT_LIST_HEAD(&ctx->napi_list);
+       spin_lock_init(&ctx->napi_lock);
+       ctx->napi_prefer_busy_poll = false;
+-      ctx->napi_busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
++      ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt);
+ }
+ /*
+@@ -217,7 +226,7 @@ void io_napi_free(struct io_ring_ctx *ctx)
+ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
+ {
+       const struct io_uring_napi curr = {
+-              .busy_poll_to     = ctx->napi_busy_poll_to,
++              .busy_poll_to     = ktime_to_us(ctx->napi_busy_poll_dt),
+               .prefer_busy_poll = ctx->napi_prefer_busy_poll
+       };
+       struct io_uring_napi napi;
+@@ -232,7 +241,7 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
+       if (copy_to_user(arg, &curr, sizeof(curr)))
+               return -EFAULT;
+-      WRITE_ONCE(ctx->napi_busy_poll_to, napi.busy_poll_to);
++      WRITE_ONCE(ctx->napi_busy_poll_dt, napi.busy_poll_to * NSEC_PER_USEC);
+       WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
+       WRITE_ONCE(ctx->napi_enabled, true);
+       return 0;
+@@ -249,14 +258,14 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
+ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
+ {
+       const struct io_uring_napi curr = {
+-              .busy_poll_to     = ctx->napi_busy_poll_to,
++              .busy_poll_to     = ktime_to_us(ctx->napi_busy_poll_dt),
+               .prefer_busy_poll = ctx->napi_prefer_busy_poll
+       };
+       if (arg && copy_to_user(arg, &curr, sizeof(curr)))
+               return -EFAULT;
+-      WRITE_ONCE(ctx->napi_busy_poll_to, 0);
++      WRITE_ONCE(ctx->napi_busy_poll_dt, 0);
+       WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
+       WRITE_ONCE(ctx->napi_enabled, false);
+       return 0;
+@@ -275,23 +284,20 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
+ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
+                             struct timespec64 *ts)
+ {
+-      unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
++      ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
+       if (ts) {
+               struct timespec64 poll_to_ts;
+-              poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
++              poll_to_ts = ns_to_timespec64(ktime_to_ns(poll_dt));
+               if (timespec64_compare(ts, &poll_to_ts) < 0) {
+                       s64 poll_to_ns = timespec64_to_ns(ts);
+-                      if (poll_to_ns > 0) {
+-                              u64 val = poll_to_ns + 999;
+-                              do_div(val, 1000);
+-                              poll_to = val;
+-                      }
++                      if (poll_to_ns > 0)
++                              poll_dt = ns_to_ktime(poll_to_ns);
+               }
+       }
+-      iowq->napi_busy_poll_to = poll_to;
++      iowq->napi_busy_poll_dt = poll_dt;
+ }
+ /*
+@@ -320,7 +326,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
+       LIST_HEAD(napi_list);
+       bool is_stale = false;
+-      if (!READ_ONCE(ctx->napi_busy_poll_to))
++      if (!READ_ONCE(ctx->napi_busy_poll_dt))
+               return 0;
+       if (list_empty_careful(&ctx->napi_list))
+               return 0;
+diff --git a/io_uring/napi.h b/io_uring/napi.h
+index 6fc0393d0dbef..babbee36cd3eb 100644
+--- a/io_uring/napi.h
++++ b/io_uring/napi.h
+@@ -55,7 +55,7 @@ static inline void io_napi_add(struct io_kiocb *req)
+       struct io_ring_ctx *ctx = req->ctx;
+       struct socket *sock;
+-      if (!READ_ONCE(ctx->napi_busy_poll_to))
++      if (!READ_ONCE(ctx->napi_busy_poll_dt))
+               return;
+       sock = sock_from_file(req->file);
+-- 
+2.43.0
+
diff --git a/queue-6.10/powerpc-topology-check-if-a-core-is-online.patch b/queue-6.10/powerpc-topology-check-if-a-core-is-online.patch
new file mode 100644 (file)
index 0000000..873d815
--- /dev/null
@@ -0,0 +1,58 @@
+From 011c19a3b9770fa969408f8f24f05044dce4eab3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jul 2024 08:31:13 +0530
+Subject: powerpc/topology: Check if a core is online
+
+From: Nysal Jan K.A <nysal@linux.ibm.com>
+
+[ Upstream commit 227bbaabe64b6f9cd98aa051454c1d4a194a8c6a ]
+
+topology_is_core_online() checks if the core a CPU belongs to
+is online. The core is online if at least one of the sibling
+CPUs is online. The first CPU of an online core is also online
+in the common case, so this should be fairly quick.
+
+Fixes: 73c58e7e1412 ("powerpc: Add HOTPLUG_SMT support")
+Signed-off-by: Nysal Jan K.A <nysal@linux.ibm.com>
+Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240731030126.956210-3-nysal@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/topology.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index f4e6f2dd04b73..16bacfe8c7a2c 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -145,6 +145,7 @@ static inline int cpu_to_coregroup_id(int cpu)
+ #ifdef CONFIG_HOTPLUG_SMT
+ #include <linux/cpu_smt.h>
++#include <linux/cpumask.h>
+ #include <asm/cputhreads.h>
+ static inline bool topology_is_primary_thread(unsigned int cpu)
+@@ -156,6 +157,18 @@ static inline bool topology_smt_thread_allowed(unsigned int cpu)
+ {
+       return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
+ }
++
++#define topology_is_core_online topology_is_core_online
++static inline bool topology_is_core_online(unsigned int cpu)
++{
++      int i, first_cpu = cpu_first_thread_sibling(cpu);
++
++      for (i = first_cpu; i < first_cpu + threads_per_core; ++i) {
++              if (cpu_online(i))
++                      return true;
++      }
++      return false;
++}
+ #endif
+ #endif /* __KERNEL__ */
+-- 
+2.43.0
+
diff --git a/queue-6.10/printk-panic-allow-cpu-backtraces-to-be-written-into.patch b/queue-6.10/printk-panic-allow-cpu-backtraces-to-be-written-into.patch
new file mode 100644 (file)
index 0000000..996675b
--- /dev/null
@@ -0,0 +1,89 @@
+From 9df5bb24c6776879ab6f75ed206fb343f6070b4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 16:27:03 +0900
+Subject: printk/panic: Allow cpu backtraces to be written into ringbuffer
+ during panic
+
+From: Ryo Takakura <takakura@valinux.co.jp>
+
+[ Upstream commit bcc954c6caba01fca143162d5fbb90e46aa1ad80 ]
+
+commit 779dbc2e78d7 ("printk: Avoid non-panic CPUs writing
+to ringbuffer") disabled non-panic CPUs to further write messages to
+ringbuffer after panicked.
+
+Since the commit, non-panicked CPU's are not allowed to write to
+ring buffer after panicked and CPU backtrace which is triggered
+after panicked to sample non-panicked CPUs' backtrace no longer
+serves its function as it has nothing to print.
+
+Fix the issue by allowing non-panicked CPUs to write into ringbuffer
+while CPU backtrace is in flight.
+
+Fixes: 779dbc2e78d7 ("printk: Avoid non-panic CPUs writing to ringbuffer")
+Signed-off-by: Ryo Takakura <takakura@valinux.co.jp>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20240812072703.339690-1-takakura@valinux.co.jp
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/panic.h  | 1 +
+ kernel/panic.c         | 8 +++++++-
+ kernel/printk/printk.c | 2 +-
+ 3 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/panic.h b/include/linux/panic.h
+index 6717b15e798c3..556b4e2ad9aa5 100644
+--- a/include/linux/panic.h
++++ b/include/linux/panic.h
+@@ -16,6 +16,7 @@ extern void oops_enter(void);
+ extern void oops_exit(void);
+ extern bool oops_may_print(void);
++extern bool panic_triggering_all_cpu_backtrace;
+ extern int panic_timeout;
+ extern unsigned long panic_print;
+ extern int panic_on_oops;
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 8bff183d6180e..30342568e935f 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -63,6 +63,8 @@ unsigned long panic_on_taint;
+ bool panic_on_taint_nousertaint = false;
+ static unsigned int warn_limit __read_mostly;
++bool panic_triggering_all_cpu_backtrace;
++
+ int panic_timeout = CONFIG_PANIC_TIMEOUT;
+ EXPORT_SYMBOL_GPL(panic_timeout);
+@@ -252,8 +254,12 @@ void check_panic_on_warn(const char *origin)
+  */
+ static void panic_other_cpus_shutdown(bool crash_kexec)
+ {
+-      if (panic_print & PANIC_PRINT_ALL_CPU_BT)
++      if (panic_print & PANIC_PRINT_ALL_CPU_BT) {
++              /* Temporary allow non-panic CPUs to write their backtraces. */
++              panic_triggering_all_cpu_backtrace = true;
+               trigger_all_cpu_backtrace();
++              panic_triggering_all_cpu_backtrace = false;
++      }
+       /*
+        * Note that smp_send_stop() is the usual SMP shutdown function,
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index dddb15f48d595..c5d844f727f63 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2316,7 +2316,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+        * non-panic CPUs are generating any messages, they will be
+        * silently dropped.
+        */
+-      if (other_cpu_in_panic())
++      if (other_cpu_in_panic() && !panic_triggering_all_cpu_backtrace)
+               return 0;
+       if (level == LOGLEVEL_SCHED) {
+-- 
+2.43.0
+
diff --git a/queue-6.10/rust-fix-the-default-format-for-config_-rustc-bindge.patch b/queue-6.10/rust-fix-the-default-format-for-config_-rustc-bindge.patch
new file mode 100644 (file)
index 0000000..c4486e2
--- /dev/null
@@ -0,0 +1,55 @@
+From 57b4a0650a2d67678cd8c738d4deb4204b4a897b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Jul 2024 23:03:00 +0900
+Subject: rust: fix the default format for CONFIG_{RUSTC,BINDGEN}_VERSION_TEXT
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit aacf93e87f0d808ef46e621aa56caea336b4433c ]
+
+Another oddity in these config entries is their default value can fall
+back to 'n', which is a value for bool or tristate symbols.
+
+The '|| echo n' is an incorrect workaround to avoid the syntax error.
+This is not a big deal, as the entry is hidden by 'depends on RUST' in
+situations where '$(RUSTC) --version' or '$(BINDGEN) --version' fails.
+Anyway, it looks odd.
+
+The default of a string type symbol should be a double-quoted string
+literal. Turn it into an empty string when the version command fails.
+
+Fixes: 2f7ab1267dc9 ("Kbuild: add Rust support")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Link: https://lore.kernel.org/r/20240727140302.1806011-2-masahiroy@kernel.org
+[ Rebased on top of v6.11-rc1. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ init/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 051cbb22968bd..9684e5d2b81c6 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1906,7 +1906,7 @@ config RUST
+ config RUSTC_VERSION_TEXT
+       string
+       depends on RUST
+-      default $(shell,$(RUSTC) --version 2>/dev/null || echo n)
++      default "$(shell,$(RUSTC) --version 2>/dev/null)"
+ config BINDGEN_VERSION_TEXT
+       string
+@@ -1914,7 +1914,7 @@ config BINDGEN_VERSION_TEXT
+       # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
+       # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
+       # the minimum version is upgraded past that (0.69.1 already fixed the issue).
+-      default $(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null || echo n)
++      default "$(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null)"
+ #
+ # Place an empty function call at each tracepoint site. Can be
+-- 
+2.43.0
+
diff --git a/queue-6.10/rust-suppress-error-messages-from-config_-rustc-bind.patch b/queue-6.10/rust-suppress-error-messages-from-config_-rustc-bind.patch
new file mode 100644 (file)
index 0000000..3727616
--- /dev/null
@@ -0,0 +1,79 @@
+From f213e3bdd38cc55bda57714cc6fffc7f3c5a1611 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Jul 2024 23:02:59 +0900
+Subject: rust: suppress error messages from
+ CONFIG_{RUSTC,BINDGEN}_VERSION_TEXT
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 5ce86c6c861352c9346ebb5c96ed70cb67414aa3 ]
+
+While this is a somewhat unusual case, I encountered odd error messages
+when I ran Kconfig in a foreign architecture chroot.
+
+  $ make allmodconfig
+  sh: 1: rustc: not found
+  sh: 1: bindgen: not found
+  #
+  # configuration written to .config
+  #
+
+The successful execution of 'command -v rustc' does not necessarily mean
+that 'rustc --version' will succeed.
+
+  $ sh -c 'command -v rustc'
+  /home/masahiro/.cargo/bin/rustc
+  $ sh -c 'rustc --version'
+  sh: 1: rustc: not found
+
+Here, 'rustc' is built for x86, and I ran it in an arm64 system.
+
+The current code:
+
+  command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n
+
+can be turned into:
+
+  command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version 2>/dev/null || echo n
+
+However, I did not understand the necessity of 'command -v $(RUSTC)'.
+
+I simplified it to:
+
+  $(RUSTC) --version 2>/dev/null || echo n
+
+Fixes: 2f7ab1267dc9 ("Kbuild: add Rust support")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Link: https://lore.kernel.org/r/20240727140302.1806011-1-masahiroy@kernel.org
+[ Rebased on top of v6.11-rc1. - Miguel ]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ init/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index b7683506264f0..051cbb22968bd 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1906,7 +1906,7 @@ config RUST
+ config RUSTC_VERSION_TEXT
+       string
+       depends on RUST
+-      default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n)
++      default $(shell,$(RUSTC) --version 2>/dev/null || echo n)
+ config BINDGEN_VERSION_TEXT
+       string
+@@ -1914,7 +1914,7 @@ config BINDGEN_VERSION_TEXT
+       # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
+       # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
+       # the minimum version is upgraded past that (0.69.1 already fixed the issue).
+-      default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version workaround-for-0.69.0 || echo n)
++      default $(shell,$(BINDGEN) --version workaround-for-0.69.0 2>/dev/null || echo n)
+ #
+ # Place an empty function call at each tracepoint site. Can be
+-- 
+2.43.0
+
diff --git a/queue-6.10/rust-work-around-bindgen-0.69.0-issue.patch b/queue-6.10/rust-work-around-bindgen-0.69.0-issue.patch
new file mode 100644 (file)
index 0000000..0863ee8
--- /dev/null
@@ -0,0 +1,72 @@
+From 10578adbb4956d279377628129f5cbf99e1dddce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Jul 2024 18:06:03 +0200
+Subject: rust: work around `bindgen` 0.69.0 issue
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit 9e98db17837093cb0f4dcfcc3524739d93249c45 ]
+
+`bindgen` 0.69.0 contains a bug: `--version` does not work without
+providing a header [1]:
+
+    error: the following required arguments were not provided:
+      <HEADER>
+
+    Usage: bindgen <FLAGS> <OPTIONS> <HEADER> -- <CLANG_ARGS>...
+
+Thus, in preparation for supporting several `bindgen` versions, work
+around the issue by passing a dummy argument.
+
+Include a comment so that we can remove the workaround in the future.
+
+Link: https://github.com/rust-lang/rust-bindgen/pull/2678 [1]
+Reviewed-by: Finn Behrens <me@kloenk.dev>
+Tested-by: Benno Lossin <benno.lossin@proton.me>
+Tested-by: Andreas Hindborg <a.hindborg@samsung.com>
+Link: https://lore.kernel.org/r/20240709160615.998336-9-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Stable-dep-of: 5ce86c6c8613 ("rust: suppress error messages from CONFIG_{RUSTC,BINDGEN}_VERSION_TEXT")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ init/Kconfig                 | 5 ++++-
+ scripts/rust_is_available.sh | 6 +++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 6e97693b675f2..b7683506264f0 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1911,7 +1911,10 @@ config RUSTC_VERSION_TEXT
+ config BINDGEN_VERSION_TEXT
+       string
+       depends on RUST
+-      default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
++      # The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++      # (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++      # the minimum version is upgraded past that (0.69.1 already fixed the issue).
++      default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version workaround-for-0.69.0 || echo n)
+ #
+ # Place an empty function call at each tracepoint site. Can be
+diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
+index 117018946b577..a6fdcf13e0e53 100755
+--- a/scripts/rust_is_available.sh
++++ b/scripts/rust_is_available.sh
+@@ -129,8 +129,12 @@ fi
+ # Check that the Rust bindings generator is suitable.
+ #
+ # Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
++#
++# The dummy parameter `workaround-for-0.69.0` is required to support 0.69.0
++# (https://github.com/rust-lang/rust-bindgen/pull/2678). It can be removed when
++# the minimum version is upgraded past that (0.69.1 already fixed the issue).
+ rust_bindings_generator_output=$( \
+-      LC_ALL=C "$BINDGEN" --version 2>/dev/null
++      LC_ALL=C "$BINDGEN" --version workaround-for-0.69.0 2>/dev/null
+ ) || rust_bindings_generator_code=$?
+ if [ -n "$rust_bindings_generator_code" ]; then
+       echo >&2 "***"
+-- 
+2.43.0
+
diff --git a/queue-6.10/s390-dasd-remove-dma-alignment.patch b/queue-6.10/s390-dasd-remove-dma-alignment.patch
new file mode 100644 (file)
index 0000000..9696998
--- /dev/null
@@ -0,0 +1,95 @@
+From 1f91fdcda97a7ce0ddc29f039066320f53be0186 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Aug 2024 14:57:32 +0200
+Subject: s390/dasd: Remove DMA alignment
+
+From: Eric Farman <farman@linux.ibm.com>
+
+[ Upstream commit 2a07bb64d80152701d507b1498237ed1b8d83866 ]
+
+This reverts commit bc792884b76f ("s390/dasd: Establish DMA alignment").
+
+Quoting the original commit:
+    linux-next commit bf8d08532bc1 ("iomap: add support for dma aligned
+    direct-io") changes the alignment requirement to come from the block
+    device rather than the block size, and the default alignment
+    requirement is 512-byte boundaries. Since DASD I/O has page
+    alignments for IDAW/TIDAW requests, let's override this value to
+    restore the expected behavior.
+
+I mentioned TIDAW, but that was wrong. TIDAWs have no distinct alignment
+requirement (per p. 15-70 of POPS SA22-7832-13):
+
+   Unless otherwise specified, TIDAWs may designate
+   a block of main storage on any boundary and length
+   up to 4K bytes, provided the specified block does not
+   cross a 4 K-byte boundary.
+
+IDAWs do, but the original commit neglected that while ECKD DASD are
+typically formatted in 4096-byte blocks, they don't HAVE to be. Formatting
+an ECKD volume with smaller blocks is permitted (dasdfmt -b xxx), and the
+problematic commit enforces alignment properties to such a device that
+will result in errors, such as:
+
+   [test@host ~]# lsdasd -l a367 | grep blksz
+     blksz:                            512
+   [test@host ~]# mkfs.xfs -f /dev/disk/by-path/ccw-0.0.a367-part1
+   meta-data=/dev/dasdc1            isize=512    agcount=4, agsize=230075 blks
+            =                       sectsz=512   attr=2, projid32bit=1
+            =                       crc=1        finobt=1, sparse=1, rmapbt=1
+            =                       reflink=1    bigtime=1 inobtcount=1 nrext64=1
+   data     =                       bsize=4096   blocks=920299, imaxpct=25
+            =                       sunit=0      swidth=0 blks
+   naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
+   log      =internal log           bsize=4096   blocks=16384, version=2
+            =                       sectsz=512   sunit=0 blks, lazy-count=1
+   realtime =none                   extsz=4096   blocks=0, rtextents=0
+   error reading existing superblock: Invalid argument
+   mkfs.xfs: pwrite failed: Invalid argument
+   libxfs_bwrite: write failed on (unknown) bno 0x70565c/0x100, err=22
+   mkfs.xfs: Releasing dirty buffer to free list!
+   found dirty buffer (bulk) on free list!
+   mkfs.xfs: pwrite failed: Invalid argument
+   ...snipped...
+
+The original commit omitted the FBA discipline for just this reason,
+but the formatted block size of the other disciplines was overlooked.
+The solution to all of this is to revert to the original behavior,
+such that the block size can be respected. There were two commits [1]
+that moved this code in the interim, so a straight git-revert is not
+possible, but the change is straightforward.
+
+But what of the original problem? That was manifested with a direct-io
+QEMU guest, where QEMU itself was changed a month or two later with
+commit 25474d90aa ("block: use the request length for iov alignment")
+such that the blamed kernel commit is unnecessary.
+
+[1] commit 0127a47f58c6 ("dasd: move queue setup to common code")
+    commit fde07a4d74e3 ("dasd: use the atomic queue limits API")
+
+Fixes: bc792884b76f ("s390/dasd: Establish DMA alignment")
+Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
+Signed-off-by: Eric Farman <farman@linux.ibm.com>
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Link: https://lore.kernel.org/r/20240812125733.126431-2-sth@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/block/dasd_genhd.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
+index 4533dd055ca8e..23d6e638f381d 100644
+--- a/drivers/s390/block/dasd_genhd.c
++++ b/drivers/s390/block/dasd_genhd.c
+@@ -41,7 +41,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
+                */
+               .max_segment_size = PAGE_SIZE,
+               .seg_boundary_mask = PAGE_SIZE - 1,
+-              .dma_alignment = PAGE_SIZE - 1,
+               .max_segments = USHRT_MAX,
+       };
+       struct gendisk *gdp;
+-- 
+2.43.0
+
index 172455e2024c80c9e5511b4bb13b14796e369ed9..eb5f0bb4561559425fb9ff9341a7d021038c0eb4 100644 (file)
@@ -122,3 +122,15 @@ iommu-restore-lost-return-in-iommu_report_device_fau.patch
 gpio-mlxbf3-support-shutdown-function.patch
 alsa-hda-realtek-fix-noise-from-speakers-on-lenovo-i.patch
 drm-v3d-fix-out-of-bounds-read-in-v3d_csd_job_run.patch
+rust-work-around-bindgen-0.69.0-issue.patch
+rust-suppress-error-messages-from-config_-rustc-bind.patch
+rust-fix-the-default-format-for-config_-rustc-bindge.patch
+s390-dasd-remove-dma-alignment.patch
+io_uring-napi-remove-unnecessary-s64-cast.patch
+io_uring-napi-use-ktime-in-busy-polling.patch
+io_uring-napi-check-napi_enabled-in-io_napi_add-befo.patch
+cpu-smt-enable-smt-only-if-a-core-is-online.patch
+powerpc-topology-check-if-a-core-is-online.patch
+printk-panic-allow-cpu-backtraces-to-be-written-into.patch
+arm64-fix-kasan-random-tag-seed-initialization.patch
+block-fix-lockdep-warning-in-blk_mq_mark_tag_wait.patch