]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
eventpoll: Control irq suspension for prefer_busy_poll
authorMartin Karsten <mkarsten@uwaterloo.ca>
Sat, 9 Nov 2024 05:02:34 +0000 (05:02 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 12 Nov 2024 02:45:06 +0000 (18:45 -0800)
When events are reported to userland and prefer_busy_poll is set, irqs
are temporarily suspended using napi_suspend_irqs.

If no events are found and ep_poll would go to sleep, irq suspension is
cancelled using napi_resume_irqs.

Signed-off-by: Martin Karsten <mkarsten@uwaterloo.ca>
Co-developed-by: Joe Damato <jdamato@fastly.com>
Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Joe Damato <jdamato@fastly.com>
Tested-by: Martin Karsten <mkarsten@uwaterloo.ca>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Link: https://patch.msgid.link/20241109050245.191288-5-jdamato@fastly.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
fs/eventpoll.c

index f9e0d9307dad4d33bfa10f6e840f6a586b4edd62..83bcb559b89f8458c7c906625e3e70ef5af75ef5 100644 (file)
@@ -457,6 +457,8 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
                 * it back in when we have moved a socket with a valid NAPI
                 * ID onto the ready list.
                 */
+               if (prefer_busy_poll)
+                       napi_resume_irqs(napi_id);
                ep->napi_id = 0;
                return false;
        }
@@ -540,6 +542,22 @@ static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
        }
 }
 
+static void ep_suspend_napi_irqs(struct eventpoll *ep)
+{
+       unsigned int napi_id = READ_ONCE(ep->napi_id);
+
+       if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll))
+               napi_suspend_irqs(napi_id);
+}
+
+static void ep_resume_napi_irqs(struct eventpoll *ep)
+{
+       unsigned int napi_id = READ_ONCE(ep->napi_id);
+
+       if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll))
+               napi_resume_irqs(napi_id);
+}
+
 #else
 
 static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
@@ -557,6 +575,14 @@ static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
        return -EOPNOTSUPP;
 }
 
+static void ep_suspend_napi_irqs(struct eventpoll *ep)
+{
+}
+
+static void ep_resume_napi_irqs(struct eventpoll *ep)
+{
+}
+
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
 /*
@@ -788,6 +814,7 @@ static bool ep_refcount_dec_and_test(struct eventpoll *ep)
 
 static void ep_free(struct eventpoll *ep)
 {
+       ep_resume_napi_irqs(ep);
        mutex_destroy(&ep->mtx);
        free_uid(ep->user);
        wakeup_source_unregister(ep->ws);
@@ -2005,8 +2032,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
                         * trying again in search of more luck.
                         */
                        res = ep_send_events(ep, events, maxevents);
-                       if (res)
+                       if (res) {
+                               if (res > 0)
+                                       ep_suspend_napi_irqs(ep);
                                return res;
+                       }
                }
 
                if (timed_out)