enum {
IO_WQ_BIT_EXIT = 0, /* wq exiting */
+ IO_WQ_BIT_EXIT_ON_IDLE = 1, /* allow all workers to exit on idle */
};
enum {
raw_spin_lock(&acct->workers_lock);
/*
* Last sleep timed out. Exit if we're not the last worker,
- * or if someone modified our affinity.
+ * or if someone modified our affinity. If wq is marked
+ * idle-exit, drop the worker as well. This is used to avoid
+ * keeping io-wq workers around for tasks that no longer have
+ * any active io_uring instances.
*/
- if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
+ if ((last_timeout && (exit_mask || acct->nr_workers > 1)) ||
+ test_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state)) {
acct->nr_workers--;
raw_spin_unlock(&acct->workers_lock);
__set_current_state(TASK_RUNNING);
return false;
}
+void io_wq_set_exit_on_idle(struct io_wq *wq, bool enable)
+{
+ if (!wq->task)
+ return;
+
+ if (!enable) {
+ clear_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state);
+ return;
+ }
+
+ if (test_and_set_bit(IO_WQ_BIT_EXIT_ON_IDLE, &wq->state))
+ return;
+
+ rcu_read_lock();
+ io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
+ rcu_read_unlock();
+}
+
static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
{
do {
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
void io_wq_exit_start(struct io_wq *wq);
void io_wq_put_and_exit(struct io_wq *wq);
+void io_wq_set_exit_on_idle(struct io_wq *wq, bool enable);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);