From 5b8c94db3026bcfd324eaa6f61a1655358182667 Mon Sep 17 00:00:00 2001 From: Victor Julien Date: Sat, 21 Feb 2015 14:19:48 +0100 Subject: [PATCH] Remove spinning PacketPoolWait PacketPoolWait in autofp can wait for considerable time. Until now it was essentially spinning, keeping the CPU 100% busy. This patch introduces a condition to wait in such cases. Atomically flag pool that consumer is waiting, so that we can sync the pending pool right away instead of waiting for the MAX_PENDING_RETURN_PACKETS limit. --- src/tmqh-packetpool.c | 17 ++++++++++++++++- src/tmqh-packetpool.h | 3 +++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/tmqh-packetpool.c b/src/tmqh-packetpool.c index 6ef25c7d32..5ea5c426d3 100644 --- a/src/tmqh-packetpool.c +++ b/src/tmqh-packetpool.c @@ -146,6 +146,13 @@ void PacketPoolWait(void) { PktPool *my_pool = GetThreadPacketPool(); + if (PacketPoolIsEmpty(my_pool)) { + SCMutexLock(&my_pool->return_stack.mutex); + SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1); + SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex); + SCMutexUnlock(&my_pool->return_stack.mutex); + } + while(PacketPoolIsEmpty(my_pool)) cc_barrier(); } @@ -246,12 +253,14 @@ void PacketPoolReturnPacket(Packet *p) p->next = my_pool->pending_head; my_pool->pending_head = p; my_pool->pending_count++; - if (my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) { + if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) { /* Return the entire list of pending packets. */ SCMutexLock(&pool->return_stack.mutex); my_pool->pending_tail->next = pool->return_stack.head; pool->return_stack.head = my_pool->pending_head; + SC_ATOMIC_RESET(pool->return_stack.sync_now); SCMutexUnlock(&pool->return_stack.mutex); + SCCondSignal(&pool->return_stack.cond); /* Clear the list of pending packets to return. */ my_pool->pending_pool = NULL; my_pool->pending_head = NULL; @@ -263,7 +272,9 @@ void PacketPoolReturnPacket(Packet *p) SCMutexLock(&pool->return_stack.mutex); p->next = pool->return_stack.head; pool->return_stack.head = p; + SC_ATOMIC_RESET(pool->return_stack.sync_now); SCMutexUnlock(&pool->return_stack.mutex); + SCCondSignal(&pool->return_stack.cond); } } } @@ -279,6 +290,8 @@ void PacketPoolInit(void) PktPool *my_pool = GetThreadPacketPool(); SCMutexInit(&my_pool->return_stack.mutex, NULL); + SCCondInit(&my_pool->return_stack.cond, NULL); + SC_ATOMIC_INIT(my_pool->return_stack.sync_now); /* pre allocate packets */ SCLogDebug("preallocating packets... packet size %" PRIuMAX "", @@ -317,6 +330,8 @@ void PacketPoolDestroy(void) while ((p = PacketPoolGetPacket()) != NULL) { PacketFree(p); } + + SC_ATOMIC_DESTROY(my_pool->return_stack.sync_now); } Packet *TmqhInputPacketpool(ThreadVars *tv) diff --git a/src/tmqh-packetpool.h b/src/tmqh-packetpool.h index 729883cd31..f7c8b9fc1e 100644 --- a/src/tmqh-packetpool.h +++ b/src/tmqh-packetpool.h @@ -26,11 +26,14 @@ #include "decode.h" #include "threads.h" +#include "util-atomic.h" /* Return stack, onto which other threads free packets. */ typedef struct PktPoolLockedStack_{ /* linked list of free packets. */ SCMutex mutex; + SCCondT cond; + SC_ATOMIC_DECLARE(int, sync_now); Packet *head; } __attribute__((aligned(CLS))) PktPoolLockedStack; -- 2.47.2