]> git.ipfire.org Git - thirdparty/bind9.git/commitdiff
- isc_task_create_bound - create a task bound to specific task queue
authorWitold Kręcicki <wpk@isc.org>
Wed, 21 Nov 2018 09:50:50 +0000 (09:50 +0000)
committerWitold Krecicki <wpk@isc.org>
Fri, 23 Nov 2018 09:34:02 +0000 (04:34 -0500)
  If we know that we'll have a task pool doing specific thing it's better
  to use this knowledge and bind tasks to task queues, this behaves better
  than randomly choosing the task queue.

- use bound resolver tasks - we have a pool of tasks doing resolutions,
  we can spread the load evenly using isc_task_create_bound

- quantum set universally to 25

CHANGES
lib/dns/client.c
lib/dns/dispatch.c
lib/dns/resolver.c
lib/isc/include/isc/task.h
lib/isc/task.c
lib/isc/win32/libisc.def.in
lib/ns/client.c

diff --git a/CHANGES b/CHANGES
index 1a223d02a77ca708d872e865054a77f3953d16fc..83703622cd8c5674a2013985937ada38447c89fa 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -1,4 +1,6 @@
-5099.   [func]          Failed mutex and conditional creations are always
+5100.  [func]          Pin resolver tasks to specific task queues. [GL !1117]
+
+5099.  [func]          Failed mutex and conditional creations are always
                        fatal. [GL #674]
 
        --- 9.13.4 released ---
index f08dec7f9e18a67a4adb4b6febc6886e894a3006..9b7a212cf76b53c1423ab088a0c7733a93e7b5b7 100644 (file)
@@ -458,7 +458,7 @@ dns_client_createx(isc_mem_t *mctx, isc_appctx_t *actx,
        client->timermgr = timermgr;
 
        client->task = NULL;
-       result = isc_task_create(client->taskmgr, 50, &client->task);
+       result = isc_task_create(client->taskmgr, 0, &client->task);
        if (result != ISC_R_SUCCESS)
                goto cleanup;
 
index 874415d777cee2a02d157337393122032ce03dc1..b6fdd52051e8e457d257bd72fa4e55daa2d641d5 100644 (file)
@@ -2918,7 +2918,7 @@ dispatch_createudp(dns_dispatchmgr_t *mgr, isc_socketmgr_t *sockmgr,
        }
        for (i = 0; i < disp->ntasks; i++) {
                disp->task[i] = NULL;
-               result = isc_task_create(taskmgr, 50, &disp->task[i]);
+               result = isc_task_create(taskmgr, 0, &disp->task[i]);
                if (result != ISC_R_SUCCESS) {
                        while (--i >= 0) {
                                isc_task_shutdown(disp->task[i]);
index bfb58f1779fe251c58e57fa408b141cdbce2b0c7..8beecbcb07ca995497d09d9dbd404c78fddb0ae7 100644 (file)
@@ -4382,8 +4382,8 @@ fctx_shutdown(fetchctx_t *fctx) {
         */
        if (fctx->state != fetchstate_init) {
                cevent = &fctx->control_event;
-               isc_task_send(fctx->res->buckets[fctx->bucketnum].task,
-                             &cevent);
+               isc_task_sendto(fctx->res->buckets[fctx->bucketnum].task,
+                               &cevent, fctx->bucketnum);
        }
 }
 
@@ -9924,7 +9924,12 @@ dns_resolver_create(dns_view_t *view,
                isc_mutex_init(&res->buckets[i].lock);
 
                res->buckets[i].task = NULL;
-               result = isc_task_create(taskmgr, 0, &res->buckets[i].task);
+               /*
+                * Since we have a pool of tasks we bind them to task queues
+                * to spread the load evenly
+                */
+               result = isc_task_create_bound(taskmgr, 0,
+                                              &res->buckets[i].task, i);
                if (result != ISC_R_SUCCESS) {
                        isc_mutex_destroy(&res->buckets[i].lock);
                        goto cleanup_buckets;
index 4ee783aedc0718c49340017ff99b89af3e01aee6..b2d9c4838460b256481932db37056ac598bdd6d0 100644 (file)
@@ -136,6 +136,10 @@ struct isc_task {
 isc_result_t
 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
                isc_task_t **taskp);
+
+isc_result_t
+isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum,
+                     isc_task_t **taskp, int threadid);
 /*%<
  * Create a task.
  *
index ef0d311c6293bd0f352f4c6139b7a79cf0a6016c..a51d3359abfcc42137126362420c4bc8ea46b898 100644 (file)
  * state it will stay on the runner it's currently on, if a task is in idle
  * state it can be woken up on a specific runner with isc_task_sendto - that
  * helps with data locality on CPU.
+ *
+ * To make load even some tasks (from task pools) are bound to specific
+ * queues using isc_task_create_bound. This way load balancing between
+ * CPUs/queues happens on the higher layer.
  */
 
 #ifdef ISC_TASK_TRACE
@@ -104,6 +108,7 @@ struct isc__task {
        char                            name[16];
        void *                          tag;
        unsigned int                    threadid;
+       bool                            bound;
        /* Locked by task manager lock. */
        LINK(isc__task_t)               link;
        LINK(isc__task_t)               ready_link;
@@ -171,8 +176,7 @@ void
 isc__taskmgr_resume(isc_taskmgr_t *manager0);
 
 
-#define DEFAULT_TASKMGR_QUANTUM                10
-#define DEFAULT_DEFAULT_QUANTUM                5
+#define DEFAULT_DEFAULT_QUANTUM                25
 #define FINISHED(m)                    ((m)->exiting && EMPTY((m)->tasks))
 
 /*%
@@ -243,7 +247,14 @@ task_finished(isc__task_t *task) {
 
 isc_result_t
 isc_task_create(isc_taskmgr_t *manager0, unsigned int quantum,
-                isc_task_t **taskp)
+               isc_task_t **taskp)
+{
+       return (isc_task_create_bound(manager0, quantum, taskp, -1));
+}
+
+isc_result_t
+isc_task_create_bound(isc_taskmgr_t *manager0, unsigned int quantum,
+                     isc_task_t **taskp, int threadid)
 {
        isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
        isc__task_t *task;
@@ -257,17 +268,31 @@ isc_task_create(isc_taskmgr_t *manager0, unsigned int quantum,
                return (ISC_R_NOMEMORY);
        XTRACE("isc_task_create");
        task->manager = manager;
-       task->threadid = atomic_fetch_add_explicit(&manager->curq, 1,
-                                                  memory_order_relaxed)
-                                                  % manager->workers;
-       isc_mutex_init(&task->lock);
 
+       if (threadid == -1) {
+               /*
+                * Task is not pinned to a queue, it's threadid will be
+                * choosen when first task will be sent to it - either
+                * randomly or specified by isc_task_sendto.
+                */
+               task->bound = false;
+               task->threadid = 0;
+       } else {
+               /*
+                * Task is pinned to a queue, it'll always be run
+                * by a specific thread.
+                */
+               task->bound = true;
+               task->threadid = threadid % manager->workers;
+       }
+
+       isc_mutex_init(&task->lock);
        task->state = task_state_idle;
        task->references = 1;
        INIT_LIST(task->events);
        INIT_LIST(task->on_shutdown);
        task->nevents = 0;
-       task->quantum = quantum;
+       task->quantum = (quantum > 0) ? quantum : manager->default_quantum;
        task->flags = 0;
        task->now = 0;
        isc_time_settoepoch(&task->tnow);
@@ -280,11 +305,10 @@ isc_task_create(isc_taskmgr_t *manager0, unsigned int quantum,
        exiting = false;
        LOCK(&manager->lock);
        if (!manager->exiting) {
-               if (task->quantum == 0)
-                       task->quantum = manager->default_quantum;
                APPEND(manager->tasks, task, link);
-       } else
+       } else {
                exiting = true;
+       }
        UNLOCK(&manager->lock);
 
        if (exiting) {
@@ -490,7 +514,10 @@ isc_task_sendto(isc_task_t *task0, isc_event_t **eventp, int c) {
        REQUIRE(VALID_TASK(task));
        XTRACE("isc_task_send");
 
-       if (c < 0) {
+       /* If task is bound ignore provided cpu. */
+       if (task->bound) {
+               c = task->threadid;
+       } else if (c < 0) {
                c = atomic_fetch_add_explicit(&task->manager->curq, 1,
                                              memory_order_relaxed);
        }
@@ -540,7 +567,9 @@ isc_task_sendtoanddetach(isc_task_t **taskp, isc_event_t **eventp, int c) {
        REQUIRE(VALID_TASK(task));
        XTRACE("isc_task_sendanddetach");
 
-       if (c < 0) {
+       if (task->bound) {
+               c = task->threadid;
+       } else if (c < 0) {
                c = atomic_fetch_add_explicit(&task->manager->curq, 1,
                                              memory_order_relaxed);
        }
index c1ad8bb250830c9e47d4c68968077f2eea6c2744..435038f367ad697f2be63944b775deaa4103faac 100644 (file)
@@ -566,6 +566,7 @@ isc_syslog_facilityfromstring
 isc_task_attach
 isc_task_beginexclusive
 isc_task_create
+isc_task_create_bound
 isc_task_destroy
 isc_task_detach
 isc_task_endexclusive
index 9ae04be75814890b8a250bf21c1232bee1d7f607..1f9f47218c7bbfc00742066f0d2ac8103e61ddfb 100644 (file)
@@ -2964,7 +2964,7 @@ client_create(ns_clientmgr_t *manager, ns_client_t **clientp) {
        ns_server_attach(manager->sctx, &client->sctx);
 
        client->task = NULL;
-       result = isc_task_create(manager->taskmgr, 50, &client->task);
+       result = isc_task_create(manager->taskmgr, 0, &client->task);
        if (result != ISC_R_SUCCESS)
                goto cleanup_client;
        isc_task_setname(client->task, "client", client);