pid_nr = pid_vnr(pid);
put_pid(pid);
- pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
+ pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
if (IS_ERR(pit->worker))
goto fail_kthread;
crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
- engine->kworker = kthread_create_worker(0, "%s", engine->name);
+ engine->kworker = kthread_run_worker(0, "%s", engine->name);
if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
- worker = kthread_create_worker(0, "card%d-crtc%d",
+ worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
if (!data[n].ce[0])
continue;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
arg[id].batch = NULL;
arg[id].count = 0;
- worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
+ worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
threads[tmp].engine = other;
threads[tmp].flags = flags;
- worker = kthread_create_worker(0, "igt/%s",
+ worker = kthread_run_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
return -ENOMEM;
for_each_gt(gt, i915, i) {
- threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
+ threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
for (n = 0; n < ncpus; n++) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d", n);
+ worker = kthread_run_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
+ worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
- worker = kthread_create_worker(0, "igt:%s",
+ worker = kthread_run_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
mutex_init(&kms->dump_mutex);
- kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n");
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
+ timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "gpu-worker");
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
- ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
+ ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
dev->hrtimer.function = &wave5_vpu_timer_callback;
- dev->worker = kthread_create_worker(0, "vpu_irq_thread");
+ dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
ret = PTR_ERR(dev->worker);
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
int err;
ec_spi->high_pri_worker =
- kthread_create_worker(0, "cros_ec_spi_high_pri");
+ kthread_run_worker(0, "cros_ec_spi_high_pri");
if (IS_ERR(ec_spi->high_pri_worker)) {
err = PTR_ERR(ec_spi->high_pri_worker);
if (ptp->info->do_aux_work) {
kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
- ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
+ ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
if (IS_ERR(ptp->kworker)) {
err = PTR_ERR(ptp->kworker);
pr_err("failed to create ptp aux_worker %d\n", err);
ctlr->busy = false;
ctlr->queue_empty = true;
- ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
if (IS_ERR(ctlr->kworker)) {
dev_err(&ctlr->dev, "failed to create message pump kworker\n");
return PTR_ERR(ctlr->kworker);
mutex_init(&port->lock);
mutex_init(&port->swap_lock);
- port->wq = kthread_create_worker(0, dev_name(dev));
+ port->wq = kthread_run_worker(0, dev_name(dev));
if (IS_ERR(port->wq))
return ERR_CAST(port->wq);
sched_set_fifo(port->wq->task);
dev = &vdpasim->vdpa.dev;
kthread_init_work(&vdpasim->work, vdpasim_work_fn);
- vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
+ vdpasim->worker = kthread_run_worker(0, "vDPA sim worker: %s",
dev_attr->name);
if (IS_ERR(vdpasim->worker))
goto err_iommu;
{
int err;
- watchdog_kworker = kthread_create_worker(0, "watchdogd");
+ watchdog_kworker = kthread_run_worker(0, "watchdogd");
if (IS_ERR(watchdog_kworker)) {
pr_err("Failed to create watchdog kworker\n");
return PTR_ERR(watchdog_kworker);
static struct kthread_worker *erofs_init_percpu_worker(int cpu)
{
struct kthread_worker *worker =
- kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u");
+ kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u");
if (IS_ERR(worker))
return worker;
const char namefmt[], ...);
#define kthread_create_worker(flags, namefmt, ...) \
-({ \
- struct kthread_worker *__kw \
- = kthread_create_worker_on_node(flags, NUMA_NO_NODE, \
- namefmt, ## __VA_ARGS__); \
- if (!IS_ERR(__kw)) \
- wake_up_process(__kw->task); \
- __kw; \
+ kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
+
+/**
+ * kthread_run_worker - create and wake a kthread worker.
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: Convenient wrapper for kthread_create_worker() followed by
+ * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
+ */
+#define kthread_run_worker(flags, namefmt, ...) \
+({ \
+ struct kthread_worker *__kw \
+ = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
+ if (!IS_ERR(__kw)) \
+ wake_up_process(__kw->task); \
+ __kw; \
})
struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[]);
+/**
+ * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_worker_on_cpu()
+ * followed by wake_up_process(). Returns the kthread_worker or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct kthread_worker *
+kthread_run_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[])
+{
+ struct kthread_worker *kw;
+
+ kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
+ if (!IS_ERR(kw))
+ wake_up_process(kw->task);
+
+ return kw;
+}
+
bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
va_end(args);
- if (worker)
- wake_up_process(worker->task);
-
return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_node);
-static __printf(3, 4) struct kthread_worker *
-__kthread_create_worker_on_cpu(int cpu, unsigned int flags,
- const char namefmt[], ...)
-{
- struct kthread_worker *worker;
- va_list args;
-
- va_start(args, namefmt);
- worker = __kthread_create_worker_on_node(flags, cpu_to_node(cpu),
- namefmt, args);
- va_end(args);
-
- if (worker) {
- kthread_bind(worker->task, cpu);
- wake_up_process(worker->task);
- }
-
- return worker;
-}
-
/**
* kthread_create_worker_on_cpu - create a kthread worker and bind it
* to a given CPU and the associated NUMA node.
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
const char namefmt[])
{
- return __kthread_create_worker_on_cpu(cpu, flags, namefmt, cpu);
+ struct kthread_worker *worker;
+
+ worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
+ if (!IS_ERR(worker))
+ kthread_bind(worker->task, cpu);
+
+ return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
if (rnp->exp_kworker)
return;
- kworker = kthread_create_worker(0, name, rnp_index);
+ kworker = kthread_run_worker(0, name, rnp_index);
if (IS_ERR_OR_NULL(kworker)) {
pr_err("Failed to create par gp kworker on %d/%d\n",
rnp->grplo, rnp->grphi);
const char *name = "rcu_exp_gp_kthread_worker";
struct sched_param param = { .sched_priority = kthread_prio };
- rcu_exp_gp_kworker = kthread_create_worker(0, name);
+ rcu_exp_gp_kworker = kthread_run_worker(0, name);
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
pr_err("Failed to create %s!\n", name);
rcu_exp_gp_kworker = NULL;
{
struct kthread_worker *helper;
- helper = kthread_create_worker(0, name);
+ helper = kthread_run_worker(0, name);
if (helper)
sched_set_fifo(helper->task);
return helper;
unsigned long thresh;
unsigned long bogo;
- pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
+ pwq_release_worker = kthread_run_worker(0, "pool_workqueue_release");
BUG_ON(IS_ERR(pwq_release_worker));
/* if the user set it to a specific value, keep it */
if (!priv)
return -ENOMEM;
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
ret = PTR_ERR(xmit_worker);
if (!priv)
return -ENOMEM;
- priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker);
kfree(priv);
spin_lock_init(&priv->meta_lock);
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker);