TASK_RQ_LOCK,
TASK_WQ_LOCK,
POOL_LOCK,
+ SIGNALS_LOCK,
LOCK_LABELS
};
struct lock_stat {
static inline void show_lock_stats()
{
const char *labels[LOCK_LABELS] = {"THREAD_SYNC", "FDTAB", "FDCACHE", "FD", "POLL",
- "TASK_RQ", "TASK_WQ", "POOL" };
+ "TASK_RQ", "TASK_WQ", "POOL",
+ "SIGNALS" };
int lbl;
for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
#include <signal.h>
#include <common/standard.h>
+#include <common/hathreads.h>
+
#include <types/signal.h>
#include <types/task.h>
extern struct signal_descriptor signal_state[];
extern struct pool_head *pool2_sig_handlers;
+#ifdef USE_THREAD
+extern HA_SPINLOCK_T signals_lock;
+#endif
+
void signal_handler(int sig);
void __signal_process_queue();
int signal_init();
sigset_t blocked_sig;
int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
+#ifdef USE_THREAD
+HA_SPINLOCK_T signals_lock;
+#endif
+
/* Common signal handler, used by all signals. Received signals are queued.
* Signal number zero has a specific status, as it cannot be delivered by the
* system, any function may call it to perform asynchronous signal delivery.
struct signal_descriptor *desc;
sigset_t old_sig;
+ if (SPIN_TRYLOCK(SIGNALS_LOCK, &signals_lock))
+ return;
+
/* block signal delivery during processing */
sigprocmask(SIG_SETMASK, &blocked_sig, &old_sig);
/* restore signal delivery */
sigprocmask(SIG_SETMASK, &old_sig, NULL);
+ SPIN_UNLOCK(SIGNALS_LOCK, &signals_lock);
}
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
memset(signal_queue, 0, sizeof(signal_queue));
memset(signal_state, 0, sizeof(signal_state));
+ SPIN_INIT(&signals_lock);
+
/* Ensure signals are not blocked. Some shells or service managers may
* accidently block all of our signals unfortunately, causing lots of
* zombie processes to remain in the background during reloads.
pool_free2(pool2_sig_handlers, sh);
}
}
+ SPIN_DESTROY(&signals_lock);
}
/* Register a function and an integer argument on a signal. A pointer to the