]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Factor out move_task_between_dsqs() from scx_dispatch_from_dsq()
authorTejun Heo <tj@kernel.org>
Wed, 25 Sep 2024 00:08:52 +0000 (14:08 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 27 Sep 2024 21:06:28 +0000 (11:06 -1000)
Pure reorganization. No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c

index 3cd7c50a51c506e9536b10294c7e6de4a7c003ec..74d0ebc5e15eb3c0892891c2d73eb9eaab0921cb 100644 (file)
@@ -2369,6 +2369,73 @@ static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *r
 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
 #endif /* CONFIG_SMP */
 
+/**
+ * move_task_between_dsqs() - Move a task from one DSQ to another
+ * @p: target task
+ * @enq_flags: %SCX_ENQ_*
+ * @src_dsq: DSQ @p is currently on, must not be a local DSQ
+ * @dst_dsq: DSQ @p is being moved to, can be any DSQ
+ *
+ * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
+ * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
+ * will change. As @p's task_rq is locked, this function doesn't need to use the
+ * holding_cpu mechanism.
+ *
+ * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
+ * return value, is locked.
+ */
+static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
+                                        struct scx_dispatch_q *src_dsq,
+                                        struct scx_dispatch_q *dst_dsq)
+{
+       struct rq *src_rq = task_rq(p), *dst_rq;
+
+       BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
+       lockdep_assert_held(&src_dsq->lock);
+       lockdep_assert_rq_held(src_rq);
+
+       if (dst_dsq->id == SCX_DSQ_LOCAL) {
+               dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+               if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
+                       dst_dsq = find_global_dsq(p);
+                       dst_rq = src_rq;
+               }
+       } else {
+               /* no need to migrate if destination is a non-local DSQ */
+               dst_rq = src_rq;
+       }
+
+       /*
+        * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
+        * CPU, @p will be migrated.
+        */
+       if (dst_dsq->id == SCX_DSQ_LOCAL) {
+               /* @p is going from a non-local DSQ to a local DSQ */
+               if (src_rq == dst_rq) {
+                       task_unlink_from_dsq(p, src_dsq);
+                       move_local_task_to_local_dsq(p, enq_flags,
+                                                    src_dsq, dst_rq);
+                       raw_spin_unlock(&src_dsq->lock);
+               } else {
+                       raw_spin_unlock(&src_dsq->lock);
+                       move_remote_task_to_local_dsq(p, enq_flags,
+                                                     src_rq, dst_rq);
+               }
+       } else {
+               /*
+                * @p is going from a non-local DSQ to a non-local DSQ. As
+                * $src_dsq is already locked, do an abbreviated dequeue.
+                */
+               task_unlink_from_dsq(p, src_dsq);
+               p->scx.dsq = NULL;
+               raw_spin_unlock(&src_dsq->lock);
+
+               dispatch_enqueue(dst_dsq, p, enq_flags);
+       }
+
+       return dst_rq;
+}
+
 static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
 {
        struct task_struct *p;
@@ -6033,7 +6100,7 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
                                  u64 enq_flags)
 {
        struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
-       struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
+       struct rq *this_rq, *src_rq, *locked_rq;
        bool dispatched = false;
        bool in_balance;
        unsigned long flags;
@@ -6079,51 +6146,18 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
        /* @p is still on $src_dsq and stable, determine the destination */
        dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
 
-       if (dst_dsq->id == SCX_DSQ_LOCAL) {
-               dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
-               if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
-                       dst_dsq = find_global_dsq(p);
-                       dst_rq = src_rq;
-               }
-       } else {
-               /* no need to migrate if destination is a non-local DSQ */
-               dst_rq = src_rq;
-       }
-
        /*
-        * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
-        * CPU, @p will be migrated.
+        * Apply vtime and slice updates before moving so that the new time is
+        * visible before inserting into $dst_dsq. @p is still on $src_dsq but
+        * this is safe as we're locking it.
         */
-       if (dst_dsq->id == SCX_DSQ_LOCAL) {
-               /* @p is going from a non-local DSQ to a local DSQ */
-               if (src_rq == dst_rq) {
-                       task_unlink_from_dsq(p, src_dsq);
-                       move_local_task_to_local_dsq(p, enq_flags,
-                                                    src_dsq, dst_rq);
-                       raw_spin_unlock(&src_dsq->lock);
-               } else {
-                       raw_spin_unlock(&src_dsq->lock);
-                       move_remote_task_to_local_dsq(p, enq_flags,
-                                                     src_rq, dst_rq);
-                       locked_rq = dst_rq;
-               }
-       } else {
-               /*
-                * @p is going from a non-local DSQ to a non-local DSQ. As
-                * $src_dsq is already locked, do an abbreviated dequeue.
-                */
-               task_unlink_from_dsq(p, src_dsq);
-               p->scx.dsq = NULL;
-               raw_spin_unlock(&src_dsq->lock);
-
-               if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
-                       p->scx.dsq_vtime = kit->vtime;
-               dispatch_enqueue(dst_dsq, p, enq_flags);
-       }
-
+       if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
+               p->scx.dsq_vtime = kit->vtime;
        if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
                p->scx.slice = kit->slice;
 
+       /* execute move */
+       locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
        dispatched = true;
 out:
        if (in_balance) {