]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Sun, 3 Nov 2024 20:24:39 +0000 (15:24 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 3 Nov 2024 20:24:39 +0000 (15:24 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.6/rcu-tasks-add-data-to-eliminate-rcu-tasks-do_exit-de.patch [new file with mode: 0644]
queue-6.6/rcu-tasks-fix-access-non-existent-percpu-rtpcp-varia.patch [new file with mode: 0644]
queue-6.6/rcu-tasks-initialize-data-to-eliminate-rcu-tasks-do_.patch [new file with mode: 0644]
queue-6.6/rcu-tasks-pull-sampling-of-percpu_dequeue_lim-out-of.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/usb-gadget-dummy-hcd-fix-task-hung-problem.patch [new file with mode: 0644]
queue-6.6/usb-gadget-dummy_hcd-execute-hrtimer-callback-in-sof.patch [new file with mode: 0644]
queue-6.6/usb-gadget-dummy_hcd-set-transfer-interval-to-1-micr.patch [new file with mode: 0644]
queue-6.6/usb-gadget-dummy_hcd-switch-to-hrtimer-transfer-sche.patch [new file with mode: 0644]

diff --git a/queue-6.6/rcu-tasks-add-data-to-eliminate-rcu-tasks-do_exit-de.patch b/queue-6.6/rcu-tasks-add-data-to-eliminate-rcu-tasks-do_exit-de.patch
new file mode 100644 (file)
index 0000000..79adaa6
--- /dev/null
@@ -0,0 +1,79 @@
+From bf64fc5086c244a40bfbf58bbbadeb6b2421b869 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Feb 2024 13:08:22 -0800
+Subject: rcu-tasks: Add data to eliminate RCU-tasks/do_exit() deadlocks
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+[ Upstream commit bfe93930ea1ea3c6c115a7d44af6e4fea609067e ]
+
+Holding a mutex across synchronize_rcu_tasks() and acquiring
+that same mutex in code called from do_exit() after its call to
+exit_tasks_rcu_start() but before its call to exit_tasks_rcu_stop()
+results in deadlock.  This is by design, because tasks that are far
+enough into do_exit() are no longer present on the tasks list, making
+it a bit difficult for RCU Tasks to find them, let alone wait on them
+to do a voluntary context switch.  However, such deadlocks are becoming
+more frequent.  In addition, lockdep currently does not detect such
+deadlocks and they can be difficult to reproduce.
+
+In addition, if a task voluntarily context switches during that time
+(for example, if it blocks acquiring a mutex), then this task is in an
+RCU Tasks quiescent state.  And with some adjustments, RCU Tasks could
+just as well take advantage of that fact.
+
+This commit therefore adds the data structures that will be needed
+to rely on these quiescent states and to eliminate these deadlocks.
+
+Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/
+
+Reported-by: Chen Zhongjin <chenzhongjin@huawei.com>
+Reported-by: Yang Jihong <yangjihong1@huawei.com>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Tested-by: Yang Jihong <yangjihong1@huawei.com>
+Tested-by: Chen Zhongjin <chenzhongjin@huawei.com>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Stable-dep-of: fd70e9f1d85f ("rcu-tasks: Fix access non-existent percpu rtpcp variable in rcu_tasks_need_gpcb()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/sched.h | 2 ++
+ kernel/rcu/tasks.h    | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 77f01ac385f7a..3d83cc397eac1 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -854,6 +854,8 @@ struct task_struct {
+       u8                              rcu_tasks_idx;
+       int                             rcu_tasks_idle_cpu;
+       struct list_head                rcu_tasks_holdout_list;
++      int                             rcu_tasks_exit_cpu;
++      struct list_head                rcu_tasks_exit_list;
+ #endif /* #ifdef CONFIG_TASKS_RCU */
+ #ifdef CONFIG_TASKS_TRACE_RCU
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 90425d0ec09cf..7ac3c8af075fc 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -32,6 +32,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
+  * @rtp_irq_work: IRQ work queue for deferred wakeups.
+  * @barrier_q_head: RCU callback for barrier operation.
+  * @rtp_blkd_tasks: List of tasks blocked as readers.
++ * @rtp_exit_list: List of tasks in the latter portion of do_exit().
+  * @cpu: CPU number corresponding to this entry.
+  * @rtpp: Pointer to the rcu_tasks structure.
+  */
+@@ -46,6 +47,7 @@ struct rcu_tasks_percpu {
+       struct irq_work rtp_irq_work;
+       struct rcu_head barrier_q_head;
+       struct list_head rtp_blkd_tasks;
++      struct list_head rtp_exit_list;
+       int cpu;
+       struct rcu_tasks *rtpp;
+ };
+-- 
+2.43.0
+
diff --git a/queue-6.6/rcu-tasks-fix-access-non-existent-percpu-rtpcp-varia.patch b/queue-6.6/rcu-tasks-fix-access-non-existent-percpu-rtpcp-varia.patch
new file mode 100644 (file)
index 0000000..6ed8849
--- /dev/null
@@ -0,0 +1,262 @@
+From 42d97164edf9edaf0fe4fba4813d82b65496f1bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Jul 2024 12:45:42 +0800
+Subject: rcu-tasks: Fix access non-existent percpu rtpcp variable in
+ rcu_tasks_need_gpcb()
+
+From: Zqiang <qiang.zhang1211@gmail.com>
+
+[ Upstream commit fd70e9f1d85f5323096ad313ba73f5fe3d15ea41 ]
+
+For kernels built with CONFIG_FORCE_NR_CPUS=y, the nr_cpu_ids is
+defined as NR_CPUS instead of the number of possible cpus, this
+will cause the following system panic:
+
+smpboot: Allowing 4 CPUs, 0 hotplug CPUs
+...
+setup_percpu: NR_CPUS:512 nr_cpumask_bits:512 nr_cpu_ids:512 nr_node_ids:1
+...
+BUG: unable to handle page fault for address: ffffffff9911c8c8
+Oops: 0000 [#1] PREEMPT SMP PTI
+CPU: 0 PID: 15 Comm: rcu_tasks_trace Tainted: G W
+6.6.21 #1 5dc7acf91a5e8e9ac9dcfc35bee0245691283ea6
+RIP: 0010:rcu_tasks_need_gpcb+0x25d/0x2c0
+RSP: 0018:ffffa371c00a3e60 EFLAGS: 00010082
+CR2: ffffffff9911c8c8 CR3: 000000040fa20005 CR4: 00000000001706f0
+Call Trace:
+<TASK>
+? __die+0x23/0x80
+? page_fault_oops+0xa4/0x180
+? exc_page_fault+0x152/0x180
+? asm_exc_page_fault+0x26/0x40
+? rcu_tasks_need_gpcb+0x25d/0x2c0
+? __pfx_rcu_tasks_kthread+0x40/0x40
+rcu_tasks_one_gp+0x69/0x180
+rcu_tasks_kthread+0x94/0xc0
+kthread+0xe8/0x140
+? __pfx_kthread+0x40/0x40
+ret_from_fork+0x34/0x80
+? __pfx_kthread+0x40/0x40
+ret_from_fork_asm+0x1b/0x80
+</TASK>
+
+Considering that there may be holes in the CPU numbers, use the
+maximum possible cpu number, instead of nr_cpu_ids, for configuring
+enqueue and dequeue limits.
+
+[ neeraj.upadhyay: Fix htmldocs build error reported by Stephen Rothwell ]
+
+Closes: https://lore.kernel.org/linux-input/CALMA0xaTSMN+p4xUXkzrtR5r6k7hgoswcaXx7baR_z9r5jjskw@mail.gmail.com/T/#u
+Reported-by: Zhixu Liu <zhixu.liu@gmail.com>
+Signed-off-by: Zqiang <qiang.zhang1211@gmail.com>
+Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rcu/tasks.h | 82 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 53 insertions(+), 29 deletions(-)
+
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 4eae3b1bda70e..3fcd9f92d3861 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -34,6 +34,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
+  * @rtp_blkd_tasks: List of tasks blocked as readers.
+  * @rtp_exit_list: List of tasks in the latter portion of do_exit().
+  * @cpu: CPU number corresponding to this entry.
++ * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
+  * @rtpp: Pointer to the rcu_tasks structure.
+  */
+ struct rcu_tasks_percpu {
+@@ -49,6 +50,7 @@ struct rcu_tasks_percpu {
+       struct list_head rtp_blkd_tasks;
+       struct list_head rtp_exit_list;
+       int cpu;
++      int index;
+       struct rcu_tasks *rtpp;
+ };
+@@ -75,6 +77,7 @@ struct rcu_tasks_percpu {
+  * @postgp_func: This flavor's post-grace-period function (optional).
+  * @call_func: This flavor's call_rcu()-equivalent function.
+  * @rtpcpu: This flavor's rcu_tasks_percpu structure.
++ * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
+  * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
+  * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
+  * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
+@@ -108,6 +111,7 @@ struct rcu_tasks {
+       postgp_func_t postgp_func;
+       call_rcu_func_t call_func;
+       struct rcu_tasks_percpu __percpu *rtpcpu;
++      struct rcu_tasks_percpu **rtpcp_array;
+       int percpu_enqueue_shift;
+       int percpu_enqueue_lim;
+       int percpu_dequeue_lim;
+@@ -181,6 +185,8 @@ module_param(rcu_task_collapse_lim, int, 0444);
+ static int rcu_task_lazy_lim __read_mostly = 32;
+ module_param(rcu_task_lazy_lim, int, 0444);
++static int rcu_task_cpu_ids;
++
+ /* RCU tasks grace-period state for debugging. */
+ #define RTGS_INIT              0
+ #define RTGS_WAIT_WAIT_CBS     1
+@@ -245,6 +251,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+       unsigned long flags;
+       int lim;
+       int shift;
++      int maxcpu;
++      int index = 0;
+       if (rcu_task_enqueue_lim < 0) {
+               rcu_task_enqueue_lim = 1;
+@@ -254,14 +262,9 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+       }
+       lim = rcu_task_enqueue_lim;
+-      if (lim > nr_cpu_ids)
+-              lim = nr_cpu_ids;
+-      shift = ilog2(nr_cpu_ids / lim);
+-      if (((nr_cpu_ids - 1) >> shift) >= lim)
+-              shift++;
+-      WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
+-      WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
+-      smp_store_release(&rtp->percpu_enqueue_lim, lim);
++      rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
++      BUG_ON(!rtp->rtpcp_array);
++
+       for_each_possible_cpu(cpu) {
+               struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+@@ -275,14 +278,29 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+               INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
+               rtpcp->cpu = cpu;
+               rtpcp->rtpp = rtp;
++              rtpcp->index = index;
++              rtp->rtpcp_array[index] = rtpcp;
++              index++;
+               if (!rtpcp->rtp_blkd_tasks.next)
+                       INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
+               if (!rtpcp->rtp_exit_list.next)
+                       INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
++              maxcpu = cpu;
+       }
+-      pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
+-                      data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
++      rcu_task_cpu_ids = maxcpu + 1;
++      if (lim > rcu_task_cpu_ids)
++              lim = rcu_task_cpu_ids;
++      shift = ilog2(rcu_task_cpu_ids / lim);
++      if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
++              shift++;
++      WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
++      WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
++      smp_store_release(&rtp->percpu_enqueue_lim, lim);
++
++      pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
++                      rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
++                      rcu_task_cb_adjust, rcu_task_cpu_ids);
+ }
+ // Compute wakeup time for lazy callback timer.
+@@ -350,7 +368,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
+                       rtpcp->rtp_n_lock_retries = 0;
+               }
+               if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
+-                  READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
++                  READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
+                       needadjust = true;  // Defer adjustment to avoid deadlock.
+       }
+       // Queuing callbacks before initialization not yet supported.
+@@ -370,10 +388,10 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
+       raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+       if (unlikely(needadjust)) {
+               raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+-              if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
++              if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
+                       WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
+-                      WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
+-                      smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
++                      WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
++                      smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
+                       pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
+               }
+               raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+@@ -446,6 +464,8 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+       dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
+       for (cpu = 0; cpu < dequeue_limit; cpu++) {
++              if (!cpu_possible(cpu))
++                      continue;
+               struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+               /* Advance and accelerate any new callbacks. */
+@@ -483,7 +503,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+       if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
+               raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+               if (rtp->percpu_enqueue_lim > 1) {
+-                      WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
++                      WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
+                       smp_store_release(&rtp->percpu_enqueue_lim, 1);
+                       rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
+                       gpdone = false;
+@@ -498,7 +518,9 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+                       pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
+               }
+               if (rtp->percpu_dequeue_lim == 1) {
+-                      for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
++                      for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
++                              if (!cpu_possible(cpu))
++                                      continue;
+                               struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+                               WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
+@@ -513,30 +535,32 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ // Advance callbacks and invoke any that are ready.
+ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
+ {
+-      int cpu;
+-      int cpunext;
+       int cpuwq;
+       unsigned long flags;
+       int len;
++      int index;
+       struct rcu_head *rhp;
+       struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
+       struct rcu_tasks_percpu *rtpcp_next;
+-      cpu = rtpcp->cpu;
+-      cpunext = cpu * 2 + 1;
+-      if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+-              rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+-              cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
+-              queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
+-              cpunext++;
+-              if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
+-                      rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
+-                      cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
++      index = rtpcp->index * 2 + 1;
++      if (index < num_possible_cpus()) {
++              rtpcp_next = rtp->rtpcp_array[index];
++              if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
++                      cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
+                       queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
++                      index++;
++                      if (index < num_possible_cpus()) {
++                              rtpcp_next = rtp->rtpcp_array[index];
++                              if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
++                                      cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
++                                      queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
++                              }
++                      }
+               }
+       }
+-      if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
++      if (rcu_segcblist_empty(&rtpcp->cblist))
+               return;
+       raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
+       rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
+-- 
+2.43.0
+
diff --git a/queue-6.6/rcu-tasks-initialize-data-to-eliminate-rcu-tasks-do_.patch b/queue-6.6/rcu-tasks-initialize-data-to-eliminate-rcu-tasks-do_.patch
new file mode 100644 (file)
index 0000000..d7df8bc
--- /dev/null
@@ -0,0 +1,84 @@
+From 190bd105617af865b7aa6e5c76867c30a5014f70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Feb 2024 13:10:19 -0800
+Subject: rcu-tasks: Initialize data to eliminate RCU-tasks/do_exit() deadlocks
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+[ Upstream commit 46faf9d8e1d52e4a91c382c6c72da6bd8e68297b ]
+
+Holding a mutex across synchronize_rcu_tasks() and acquiring
+that same mutex in code called from do_exit() after its call to
+exit_tasks_rcu_start() but before its call to exit_tasks_rcu_stop()
+results in deadlock.  This is by design, because tasks that are far
+enough into do_exit() are no longer present on the tasks list, making
+it a bit difficult for RCU Tasks to find them, let alone wait on them
+to do a voluntary context switch.  However, such deadlocks are becoming
+more frequent.  In addition, lockdep currently does not detect such
+deadlocks and they can be difficult to reproduce.
+
+In addition, if a task voluntarily context switches during that time
+(for example, if it blocks acquiring a mutex), then this task is in an
+RCU Tasks quiescent state.  And with some adjustments, RCU Tasks could
+just as well take advantage of that fact.
+
+This commit therefore initializes the data structures that will be needed
+to rely on these quiescent states and to eliminate these deadlocks.
+
+Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/
+
+Reported-by: Chen Zhongjin <chenzhongjin@huawei.com>
+Reported-by: Yang Jihong <yangjihong1@huawei.com>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Tested-by: Yang Jihong <yangjihong1@huawei.com>
+Tested-by: Chen Zhongjin <chenzhongjin@huawei.com>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
+Stable-dep-of: fd70e9f1d85f ("rcu-tasks: Fix access non-existent percpu rtpcp variable in rcu_tasks_need_gpcb()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ init/init_task.c   | 1 +
+ kernel/fork.c      | 1 +
+ kernel/rcu/tasks.h | 2 ++
+ 3 files changed, 4 insertions(+)
+
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9bfe6b1..fd9e27185e23a 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -152,6 +152,7 @@ struct task_struct init_task
+       .rcu_tasks_holdout = false,
+       .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
+       .rcu_tasks_idle_cpu = -1,
++      .rcu_tasks_exit_list = LIST_HEAD_INIT(init_task.rcu_tasks_exit_list),
+ #endif
+ #ifdef CONFIG_TASKS_TRACE_RCU
+       .trc_reader_nesting = 0,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 32ffbc1c96bae..9098284720e38 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1973,6 +1973,7 @@ static inline void rcu_copy_process(struct task_struct *p)
+       p->rcu_tasks_holdout = false;
+       INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+       p->rcu_tasks_idle_cpu = -1;
++      INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
+ #endif /* #ifdef CONFIG_TASKS_RCU */
+ #ifdef CONFIG_TASKS_TRACE_RCU
+       p->trc_reader_nesting = 0;
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 7ac3c8af075fc..4eae3b1bda70e 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -277,6 +277,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
+               rtpcp->rtpp = rtp;
+               if (!rtpcp->rtp_blkd_tasks.next)
+                       INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
++              if (!rtpcp->rtp_exit_list.next)
++                      INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
+       }
+       pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
+-- 
+2.43.0
+
diff --git a/queue-6.6/rcu-tasks-pull-sampling-of-percpu_dequeue_lim-out-of.patch b/queue-6.6/rcu-tasks-pull-sampling-of-percpu_dequeue_lim-out-of.patch
new file mode 100644 (file)
index 0000000..384778b
--- /dev/null
@@ -0,0 +1,81 @@
+From d038a4e204e4af724fb9b20060d1b893a91c7f75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Aug 2023 13:42:00 -0700
+Subject: rcu-tasks: Pull sampling of ->percpu_dequeue_lim out of loop
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+[ Upstream commit e62d8ae4620865411d1b2347980aa28ccf891a3d ]
+
+The rcu_tasks_need_gpcb() samples ->percpu_dequeue_lim as part of the
+condition clause of a "for" loop, which is a bit confusing.  This commit
+therefore hoists this sampling out of the loop, using the result loaded
+in the condition clause.
+
+So why does this work in the face of a concurrent switch from single-CPU
+queueing to per-CPU queueing?
+
+o      The call_rcu_tasks_generic() that makes the change has already
+       enqueued its callback, which means that all of the other CPU's
+       callback queues are empty.
+
+o      For the call_rcu_tasks_generic() that first notices
+       the switch to per-CPU queues, the smp_store_release()
+       used to update ->percpu_enqueue_lim pairs with the
+       raw_spin_trylock_rcu_node()'s full barrier that is
+       between the READ_ONCE(rtp->percpu_enqueue_shift) and the
+       rcu_segcblist_enqueue() that enqueues the callback.
+
+o      Because this CPU's queue is empty (unless it happens to
+       be the original single queue, in which case there is no
+       need for synchronization), this call_rcu_tasks_generic()
+       will do an irq_work_queue() to schedule a handler for the
+       needed rcuwait_wake_up() call.  This call will be ordered
+       after the first call_rcu_tasks_generic() function's change to
+       ->percpu_dequeue_lim.
+
+o      This rcuwait_wake_up() will either happen before or after the
+       set_current_state() in rcuwait_wait_event().  If it happens
+       before, the "condition" argument's call to rcu_tasks_need_gpcb()
+       will be ordered after the original change, and all callbacks on
+       all CPUs will be visible.  Otherwise, if it happens after, then
+       the grace-period kthread's state will be set back to running,
+       which will result in a later call to rcuwait_wait_event() and
+       thus to rcu_tasks_need_gpcb(), which will again see the change.
+
+So it all works out.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Stable-dep-of: fd70e9f1d85f ("rcu-tasks: Fix access non-existent percpu rtpcp variable in rcu_tasks_need_gpcb()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/rcu/tasks.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index df81506cf2bde..90425d0ec09cf 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -432,6 +432,7 @@ static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
+ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ {
+       int cpu;
++      int dequeue_limit;
+       unsigned long flags;
+       bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
+       long n;
+@@ -439,7 +440,8 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+       long ncbsnz = 0;
+       int needgpcb = 0;
+-      for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
++      dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
++      for (cpu = 0; cpu < dequeue_limit; cpu++) {
+               struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+               /* Advance and accelerate any new callbacks. */
+-- 
+2.43.0
+
index 2900e52493cdde2b41c2339da78c4e60d805b7d2..6ad195094f6c9787f9a57246aaf16775b41c51ba 100644 (file)
@@ -77,3 +77,11 @@ thermal-intel-int340x-processor-add-mmio-rapl-pl4-su.patch
 net-amd-mvme147-fix-probe-banner-message.patch
 nfs-remove-revoked-delegation-from-server-s-delegati.patch
 misc-sgi-gru-don-t-disable-preemption-in-gru-driver.patch
+usb-gadget-dummy_hcd-switch-to-hrtimer-transfer-sche.patch
+usb-gadget-dummy_hcd-set-transfer-interval-to-1-micr.patch
+usb-gadget-dummy_hcd-execute-hrtimer-callback-in-sof.patch
+usb-gadget-dummy-hcd-fix-task-hung-problem.patch
+rcu-tasks-pull-sampling-of-percpu_dequeue_lim-out-of.patch
+rcu-tasks-add-data-to-eliminate-rcu-tasks-do_exit-de.patch
+rcu-tasks-initialize-data-to-eliminate-rcu-tasks-do_.patch
+rcu-tasks-fix-access-non-existent-percpu-rtpcp-varia.patch
diff --git a/queue-6.6/usb-gadget-dummy-hcd-fix-task-hung-problem.patch b/queue-6.6/usb-gadget-dummy-hcd-fix-task-hung-problem.patch
new file mode 100644 (file)
index 0000000..ed2c05e
--- /dev/null
@@ -0,0 +1,124 @@
+From e08951b4861cf32ef67cffd3c708d2a6e7586124 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2024 11:44:45 -0400
+Subject: USB: gadget: dummy-hcd: Fix "task hung" problem
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+[ Upstream commit 5189df7b8088268012882c220d6aca4e64981348 ]
+
+The syzbot fuzzer has been encountering "task hung" problems ever
+since the dummy-hcd driver was changed to use hrtimers instead of
+regular timers.  It turns out that the problems are caused by a subtle
+difference between the timer_pending() and hrtimer_active() APIs.
+
+The changeover blindly replaced the first by the second.  However,
+timer_pending() returns True when the timer is queued but not when its
+callback is running, whereas hrtimer_active() returns True when the
+hrtimer is queued _or_ its callback is running.  This difference
+occasionally caused dummy_urb_enqueue() to think that the callback
+routine had not yet started when in fact it was almost finished.  As a
+result the hrtimer was not restarted, which made it impossible for the
+driver to dequeue later the URB that was just enqueued.  This caused
+usb_kill_urb() to hang, and things got worse from there.
+
+Since hrtimers have no API for telling when they are queued and the
+callback isn't running, the driver must keep track of this for itself.
+That's what this patch does, adding a new "timer_pending" flag and
+setting or clearing it at the appropriate times.
+
+Reported-by: syzbot+f342ea16c9d06d80b585@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-usb/6709234e.050a0220.3e960.0011.GAE@google.com/
+Tested-by: syzbot+f342ea16c9d06d80b585@syzkaller.appspotmail.com
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Fixes: a7f3813e589f ("usb: gadget: dummy_hcd: Switch to hrtimer transfer scheduler")
+Cc: Marcello Sylvester Bauer <sylv@sylv.io>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/2dab644e-ef87-4de8-ac9a-26f100b2c609@rowland.harvard.edu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index ff7bee78bcc49..d5d89fadde433 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -254,6 +254,7 @@ struct dummy_hcd {
+       u32                             stream_en_ep;
+       u8                              num_stream[30 / 2];
++      unsigned                        timer_pending:1;
+       unsigned                        active:1;
+       unsigned                        old_active:1;
+       unsigned                        resuming:1;
+@@ -1303,9 +1304,11 @@ static int dummy_urb_enqueue(
+               urb->error_count = 1;           /* mark as a new urb */
+       /* kick the scheduler, it'll do the rest */
+-      if (!hrtimer_active(&dum_hcd->timer))
++      if (!dum_hcd->timer_pending) {
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+                               HRTIMER_MODE_REL_SOFT);
++      }
+  done:
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+@@ -1324,9 +1327,10 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+       spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+       rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+-      if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+-                      !list_empty(&dum_hcd->urbp_list))
++      if (rc == 0 && !dum_hcd->timer_pending) {
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
++      }
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+       return rc;
+@@ -1813,6 +1817,7 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t)
+       /* look at each urb queued by the host side driver */
+       spin_lock_irqsave(&dum->lock, flags);
++      dum_hcd->timer_pending = 0;
+       if (!dum_hcd->udev) {
+               dev_err(dummy_dev(dum_hcd),
+@@ -1994,8 +1999,10 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t)
+       if (list_empty(&dum_hcd->urbp_list)) {
+               usb_put_dev(dum_hcd->udev);
+               dum_hcd->udev = NULL;
+-      } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
++      } else if (!dum_hcd->timer_pending &&
++                      dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+               /* want a 1 msec delay here */
++              dum_hcd->timer_pending = 1;
+               hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
+                               HRTIMER_MODE_REL_SOFT);
+       }
+@@ -2390,8 +2397,10 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
+       } else {
+               dum_hcd->rh_state = DUMMY_RH_RUNNING;
+               set_link_state(dum_hcd);
+-              if (!list_empty(&dum_hcd->urbp_list))
++              if (!list_empty(&dum_hcd->urbp_list)) {
++                      dum_hcd->timer_pending = 1;
+                       hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
++              }
+               hcd->state = HC_STATE_RUNNING;
+       }
+       spin_unlock_irq(&dum_hcd->dum->lock);
+@@ -2522,6 +2531,7 @@ static void dummy_stop(struct usb_hcd *hcd)
+       struct dummy_hcd        *dum_hcd = hcd_to_dummy_hcd(hcd);
+       hrtimer_cancel(&dum_hcd->timer);
++      dum_hcd->timer_pending = 0;
+       device_remove_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+       dev_info(dummy_dev(dum_hcd), "stopped\n");
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.6/usb-gadget-dummy_hcd-execute-hrtimer-callback-in-sof.patch b/queue-6.6/usb-gadget-dummy_hcd-execute-hrtimer-callback-in-sof.patch
new file mode 100644 (file)
index 0000000..b7d72d3
--- /dev/null
@@ -0,0 +1,118 @@
+From a843062442175f9eadc17f2b36af0d3696a80d5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2024 03:30:51 +0200
+Subject: usb: gadget: dummy_hcd: execute hrtimer callback in softirq context
+
+From: Andrey Konovalov <andreyknvl@gmail.com>
+
+[ Upstream commit 9313d139aa25e572d860f6f673b73a20f32d7f93 ]
+
+Commit a7f3813e589f ("usb: gadget: dummy_hcd: Switch to hrtimer transfer
+scheduler") switched dummy_hcd to use hrtimer and made the timer's
+callback be executed in the hardirq context.
+
+With that change, __usb_hcd_giveback_urb now gets executed in the hardirq
+context, which causes problems for KCOV and KMSAN.
+
+One problem is that KCOV now is unable to collect coverage from
+the USB code that gets executed from the dummy_hcd's timer callback,
+as KCOV cannot collect coverage in the hardirq context.
+
+Another problem is that the dummy_hcd hrtimer might get triggered in the
+middle of a softirq with KCOV remote coverage collection enabled, and that
+causes a WARNING in KCOV, as reported by syzbot. (I sent a separate patch
+to shut down this WARNING, but that doesn't fix the other two issues.)
+
+Finally, KMSAN appears to ignore tracking memory copying operations
+that happen in the hardirq context, which causes false positive
+kernel-infoleaks, as reported by syzbot.
+
+Change the hrtimer in dummy_hcd to execute the callback in the softirq
+context.
+
+Reported-by: syzbot+2388cdaeb6b10f0c13ac@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2388cdaeb6b10f0c13ac
+Reported-by: syzbot+17ca2339e34a1d863aad@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=17ca2339e34a1d863aad
+Reported-by: syzbot+c793a7eca38803212c61@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=c793a7eca38803212c61
+Reported-by: syzbot+1e6e0b916b211bee1bd6@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=1e6e0b916b211bee1bd6
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Closes: https://lore.kernel.org/oe-lkp/202406141323.413a90d2-lkp@intel.com
+Fixes: a7f3813e589f ("usb: gadget: dummy_hcd: Switch to hrtimer transfer scheduler")
+Cc: stable@vger.kernel.org
+Acked-by: Marcello Sylvester Bauer <sylv@sylv.io>
+Signed-off-by: Andrey Konovalov <andreyknvl@gmail.com>
+Reported-by: syzbot+edd9fe0d3a65b14588d5@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=edd9fe0d3a65b14588d5
+Link: https://lore.kernel.org/r/20240904013051.4409-1-andrey.konovalov@linux.dev
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index f37b0d8386c1a..ff7bee78bcc49 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -1304,7 +1304,8 @@ static int dummy_urb_enqueue(
+       /* kick the scheduler, it'll do the rest */
+       if (!hrtimer_active(&dum_hcd->timer))
+-              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
++                              HRTIMER_MODE_REL_SOFT);
+  done:
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+@@ -1325,7 +1326,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+       rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+                       !list_empty(&dum_hcd->urbp_list))
+-              hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+       return rc;
+@@ -1995,7 +1996,8 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t)
+               dum_hcd->udev = NULL;
+       } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+               /* want a 1 msec delay here */
+-              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS),
++                              HRTIMER_MODE_REL_SOFT);
+       }
+       spin_unlock_irqrestore(&dum->lock, flags);
+@@ -2389,7 +2391,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
+               dum_hcd->rh_state = DUMMY_RH_RUNNING;
+               set_link_state(dum_hcd);
+               if (!list_empty(&dum_hcd->urbp_list))
+-                      hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
++                      hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
+               hcd->state = HC_STATE_RUNNING;
+       }
+       spin_unlock_irq(&dum_hcd->dum->lock);
+@@ -2467,7 +2469,7 @@ static DEVICE_ATTR_RO(urbs);
+ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
+ {
+-      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+       dum_hcd->timer.function = dummy_timer;
+       dum_hcd->rh_state = DUMMY_RH_RUNNING;
+       dum_hcd->stream_en_ep = 0;
+@@ -2497,7 +2499,7 @@ static int dummy_start(struct usb_hcd *hcd)
+               return dummy_start_ss(dum_hcd);
+       spin_lock_init(&dum_hcd->dum->lock);
+-      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+       dum_hcd->timer.function = dummy_timer;
+       dum_hcd->rh_state = DUMMY_RH_RUNNING;
+-- 
+2.43.0
+
diff --git a/queue-6.6/usb-gadget-dummy_hcd-set-transfer-interval-to-1-micr.patch b/queue-6.6/usb-gadget-dummy_hcd-set-transfer-interval-to-1-micr.patch
new file mode 100644 (file)
index 0000000..ec2d7e8
--- /dev/null
@@ -0,0 +1,60 @@
+From d28294e1394ec042a68a4394a940a385562103fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Apr 2024 17:22:11 +0200
+Subject: usb: gadget: dummy_hcd: Set transfer interval to 1 microframe
+
+From: Marcello Sylvester Bauer <sylv@sylv.io>
+
+[ Upstream commit 0a723ed3baa941ca4f51d87bab00661f41142835 ]
+
+Currently, the transfer polling interval is set to 1ms, which is the
+frame rate of full-speed and low-speed USB. The USB 2.0 specification
+introduces microframes (125 microseconds) to improve the timing
+precision of data transfers.
+
+Reducing the transfer interval to 1 microframe increases data throughput
+for high-speed and super-speed USB communication
+
+Signed-off-by: Marcello Sylvester Bauer <marcello.bauer@9elements.com>
+Signed-off-by: Marcello Sylvester Bauer <sylv@sylv.io>
+Link: https://lore.kernel.org/r/6295dbb84ca76884551df9eb157cce569377a22c.1712843963.git.sylv@sylv.io
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index dab559d8ee8ca..f37b0d8386c1a 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -50,6 +50,8 @@
+ #define POWER_BUDGET  500     /* in mA; use 8 for low-power port testing */
+ #define POWER_BUDGET_3        900     /* in mA */
++#define DUMMY_TIMER_INT_NSECS 125000 /* 1 microframe */
++
+ static const char     driver_name[] = "dummy_hcd";
+ static const char     driver_desc[] = "USB Host+Gadget Emulator";
+@@ -1302,7 +1304,7 @@ static int dummy_urb_enqueue(
+       /* kick the scheduler, it'll do the rest */
+       if (!hrtimer_active(&dum_hcd->timer))
+-              hrtimer_start(&dum_hcd->timer, ms_to_ktime(1), HRTIMER_MODE_REL);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+  done:
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+@@ -1993,7 +1995,7 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t)
+               dum_hcd->udev = NULL;
+       } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+               /* want a 1 msec delay here */
+-              hrtimer_start(&dum_hcd->timer, ms_to_ktime(1), HRTIMER_MODE_REL);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL);
+       }
+       spin_unlock_irqrestore(&dum->lock, flags);
+-- 
+2.43.0
+
diff --git a/queue-6.6/usb-gadget-dummy_hcd-switch-to-hrtimer-transfer-sche.patch b/queue-6.6/usb-gadget-dummy_hcd-switch-to-hrtimer-transfer-sche.patch
new file mode 100644 (file)
index 0000000..51f1f2b
--- /dev/null
@@ -0,0 +1,153 @@
+From b866fbf01171f0e0fb93d5b94aec2a8b9db12db3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Apr 2024 16:51:28 +0200
+Subject: usb: gadget: dummy_hcd: Switch to hrtimer transfer scheduler
+
+From: Marcello Sylvester Bauer <sylv@sylv.io>
+
+[ Upstream commit a7f3813e589fd8e2834720829a47b5eb914a9afe ]
+
+The dummy_hcd transfer scheduler assumes that the internal kernel timer
+frequency is set to 1000Hz to give a polling interval of 1ms. Reducing
+the timer frequency will result in an anti-proportional reduction in
+transfer performance. Switch to a hrtimer to decouple this association.
+
+Signed-off-by: Marcello Sylvester Bauer <marcello.bauer@9elements.com>
+Signed-off-by: Marcello Sylvester Bauer <sylv@sylv.io>
+Reviewed-by: Alan Stern <stern@rowland.harvard.edu>
+Link: https://lore.kernel.org/r/57a1c2180ff74661600e010c234d1dbaba1d0d46.1712843963.git.sylv@sylv.io
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/gadget/udc/dummy_hcd.c | 35 +++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 0953e1b5c0300..dab559d8ee8ca 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -30,7 +30,7 @@
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+-#include <linux/timer.h>
++#include <linux/hrtimer.h>
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/platform_device.h>
+@@ -240,7 +240,7 @@ enum dummy_rh_state {
+ struct dummy_hcd {
+       struct dummy                    *dum;
+       enum dummy_rh_state             rh_state;
+-      struct timer_list               timer;
++      struct hrtimer                  timer;
+       u32                             port_status;
+       u32                             old_status;
+       unsigned long                   re_timeout;
+@@ -1301,8 +1301,8 @@ static int dummy_urb_enqueue(
+               urb->error_count = 1;           /* mark as a new urb */
+       /* kick the scheduler, it'll do the rest */
+-      if (!timer_pending(&dum_hcd->timer))
+-              mod_timer(&dum_hcd->timer, jiffies + 1);
++      if (!hrtimer_active(&dum_hcd->timer))
++              hrtimer_start(&dum_hcd->timer, ms_to_ktime(1), HRTIMER_MODE_REL);
+  done:
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+@@ -1323,7 +1323,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+       rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+                       !list_empty(&dum_hcd->urbp_list))
+-              mod_timer(&dum_hcd->timer, jiffies);
++              hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+       spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
+       return rc;
+@@ -1777,7 +1777,7 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
+  * drivers except that the callbacks are invoked from soft interrupt
+  * context.
+  */
+-static void dummy_timer(struct timer_list *t)
++static enum hrtimer_restart dummy_timer(struct hrtimer *t)
+ {
+       struct dummy_hcd        *dum_hcd = from_timer(dum_hcd, t, timer);
+       struct dummy            *dum = dum_hcd->dum;
+@@ -1808,8 +1808,6 @@ static void dummy_timer(struct timer_list *t)
+               break;
+       }
+-      /* FIXME if HZ != 1000 this will probably misbehave ... */
+-
+       /* look at each urb queued by the host side driver */
+       spin_lock_irqsave(&dum->lock, flags);
+@@ -1817,7 +1815,7 @@ static void dummy_timer(struct timer_list *t)
+               dev_err(dummy_dev(dum_hcd),
+                               "timer fired with no URBs pending?\n");
+               spin_unlock_irqrestore(&dum->lock, flags);
+-              return;
++              return HRTIMER_NORESTART;
+       }
+       dum_hcd->next_frame_urbp = NULL;
+@@ -1995,10 +1993,12 @@ static void dummy_timer(struct timer_list *t)
+               dum_hcd->udev = NULL;
+       } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
+               /* want a 1 msec delay here */
+-              mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
++              hrtimer_start(&dum_hcd->timer, ms_to_ktime(1), HRTIMER_MODE_REL);
+       }
+       spin_unlock_irqrestore(&dum->lock, flags);
++
++      return HRTIMER_NORESTART;
+ }
+ /*-------------------------------------------------------------------------*/
+@@ -2387,7 +2387,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd)
+               dum_hcd->rh_state = DUMMY_RH_RUNNING;
+               set_link_state(dum_hcd);
+               if (!list_empty(&dum_hcd->urbp_list))
+-                      mod_timer(&dum_hcd->timer, jiffies);
++                      hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL);
+               hcd->state = HC_STATE_RUNNING;
+       }
+       spin_unlock_irq(&dum_hcd->dum->lock);
+@@ -2465,7 +2465,8 @@ static DEVICE_ATTR_RO(urbs);
+ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
+ {
+-      timer_setup(&dum_hcd->timer, dummy_timer, 0);
++      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++      dum_hcd->timer.function = dummy_timer;
+       dum_hcd->rh_state = DUMMY_RH_RUNNING;
+       dum_hcd->stream_en_ep = 0;
+       INIT_LIST_HEAD(&dum_hcd->urbp_list);
+@@ -2494,7 +2495,8 @@ static int dummy_start(struct usb_hcd *hcd)
+               return dummy_start_ss(dum_hcd);
+       spin_lock_init(&dum_hcd->dum->lock);
+-      timer_setup(&dum_hcd->timer, dummy_timer, 0);
++      hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++      dum_hcd->timer.function = dummy_timer;
+       dum_hcd->rh_state = DUMMY_RH_RUNNING;
+       INIT_LIST_HEAD(&dum_hcd->urbp_list);
+@@ -2513,8 +2515,11 @@ static int dummy_start(struct usb_hcd *hcd)
+ static void dummy_stop(struct usb_hcd *hcd)
+ {
+-      device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
+-      dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
++      struct dummy_hcd        *dum_hcd = hcd_to_dummy_hcd(hcd);
++
++      hrtimer_cancel(&dum_hcd->timer);
++      device_remove_file(dummy_dev(dum_hcd), &dev_attr_urbs);
++      dev_info(dummy_dev(dum_hcd), "stopped\n");
+ }
+ /*-------------------------------------------------------------------------*/
+-- 
+2.43.0
+