static void
get_clientmctx(ns_clientmgr_t *manager, isc_mem_t **mctxp) {
isc_mem_t *clientmctx;
+#if CLIENT_NMCTXS > 0
+ unsigned int nextmctx;
+#endif
MTRACE("clientmctx");
- int tid = isc_nm_tid();
- if (tid < 0) {
- tid = isc_random_uniform(manager->ncpus);
+#if CLIENT_NMCTXS > 0
+ LOCK(&manager->lock);
+ if (isc_nm_tid() >= 0) {
+ nextmctx = isc_nm_tid();
+ } else {
+ nextmctx = manager->nextmctx++;
+ if (manager->nextmctx == CLIENT_NMCTXS)
+ manager->nextmctx = 0;
+
+ INSIST(nextmctx < CLIENT_NMCTXS);
}
- int rand = isc_random_uniform(CLIENT_NMCTXS_PERCPU);
- int nextmctx = (rand * manager->ncpus) + tid;
+
clientmctx = manager->mctxpool[nextmctx];
+ if (clientmctx == NULL) {
+ isc_mem_create(&clientmctx);
+ isc_mem_setname(clientmctx, "client", NULL);
+ manager->mctxpool[nextmctx] = clientmctx;
+ }
+ UNLOCK(&manager->lock);
+#else
+ clientmctx = manager->mctx;
+#endif
isc_mem_attach(clientmctx, mctxp);
}
tid = isc_random_uniform(manager->ncpus);
}
- int rand = isc_random_uniform(CLIENT_NMCTXS_PERCPU);
+ int rand = isc_random_uniform(CLIENT_NTASKS_PERCPU);
int nexttask = (rand * manager->ncpus) + tid;
isc_task_attach(manager->taskpool[nexttask], taskp);
}
static void
clientmgr_destroy(ns_clientmgr_t *manager) {
+#if CLIENT_NMCTXS > 0
int i;
+#endif
MTRACE("clientmgr_destroy");
isc_refcount_destroy(&manager->references);
manager->magic = 0;
- for (i = 0; i < manager->ncpus * CLIENT_NMCTXS_PERCPU; i++) {
- isc_mem_detach(&manager->mctxpool[i]);
+#if CLIENT_NMCTXS > 0
+ for (i = 0; i < CLIENT_NMCTXS; i++) {
+ if (manager->mctxpool[i] != NULL)
+ isc_mem_detach(&manager->mctxpool[i]);
}
- isc_mem_put(manager->mctx, manager->mctxpool,
- manager->ncpus * CLIENT_NMCTXS_PERCPU * sizeof(isc_mem_t*));
+#endif
if (manager->interface != NULL) {
ns_interface_detach(&manager->interface);
{
ns_clientmgr_t *manager;
isc_result_t result;
+#if CLIENT_NMCTXS > 0
int i;
- int npools;
+#endif
manager = isc_mem_get(mctx, sizeof(*manager));
*manager = (ns_clientmgr_t) { .magic = 0 };
ns_server_attach(sctx, &manager->sctx);
ISC_LIST_INIT(manager->recursing);
-
- npools = CLIENT_NMCTXS_PERCPU * manager->ncpus;
- manager->mctxpool = isc_mem_get(manager->mctx,
- npools * sizeof(isc_mem_t*));
- for (i = 0; i < npools; i++) {
- manager->mctxpool[i] = NULL;
- isc_mem_create(&manager->mctxpool[i]);
- isc_mem_setname(manager->mctxpool[i], "client", NULL);
- }
-
+#if CLIENT_NMCTXS > 0
+ manager->nextmctx = 0;
+ for (i = 0; i < CLIENT_NMCTXS; i++)
+ manager->mctxpool[i] = NULL; /* will be created on-demand */
+#endif
manager->magic = MANAGER_MAGIC;
MTRACE("create");
#define NS_CLIENT_SEND_BUFFER_SIZE 4096
#define NS_CLIENT_RECV_BUFFER_SIZE 4096
-#define CLIENT_NMCTXS_PERCPU 8
+#define CLIENT_NMCTXS 100
/*%<
* Number of 'mctx pools' for clients. (Should this be configurable?)
* When enabling threads, we use a pool of memory contexts shared by
isc_mutex_t reclock;
client_list_t recursing; /*%< Recursing clients */
+#if CLIENT_NMCTXS > 0
/*%< mctx pool for clients. */
- isc_mem_t ** mctxpool;
+ unsigned int nextmctx;
+ isc_mem_t * mctxpool[CLIENT_NMCTXS];
+#endif
};
/*% nameserver client structure */