*/
enum { all_threads_mask = 1UL };
enum { threads_harmless_mask = 0 };
+enum { threads_idle_mask = 0 };
enum { threads_sync_mask = 0 };
enum { threads_want_rdv_mask = 0 };
enum { tid_bit = 1UL };
raise(sig);
}
+static inline void thread_idle_now()
+{
+}
+
+static inline void thread_idle_end()
+{
+}
+
static inline void thread_harmless_now()
{
}
{
}
+static inline void thread_isolate_full()
+{
+}
+
static inline void thread_release()
{
}
void thread_harmless_till_end();
void thread_isolate();
+void thread_isolate_full();
void thread_release();
void thread_sync_release();
void ha_tkill(unsigned int thr, int sig);
extern volatile unsigned long all_threads_mask;
extern volatile unsigned long threads_harmless_mask;
+extern volatile unsigned long threads_idle_mask;
extern volatile unsigned long threads_sync_mask;
extern volatile unsigned long threads_want_rdv_mask;
extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
#endif
}
+/* Marks the thread as idle, which means that not only it's not doing anything
+ * dangerous, but in addition it has not started anything sensitive either.
+ * This essentially means that the thread currently is in the poller, thus
+ * outside of any execution block. Needs to be terminated using
+ * thread_idle_end(). This is needed to release a concurrent call to
+ * thread_isolate_full().
+ */
+static inline void thread_idle_now()
+{
+ HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
+}
+
+/* Ends the harmless period started by thread_idle_now(), i.e. the thread is
+ * about to restart engaging in sensitive operations. This must not be done on
+ * a thread marked harmless, as it could cause a deadlock between another
+ * thread waiting for idle again and thread_harmless_end() in this thread.
+ *
+ * The right sequence is thus:
+ * thread_idle_now();
+ * thread_harmless_now();
+ * poll();
+ * thread_harmless_end();
+ * thread_idle_end();
+ */
+static inline void thread_idle_end()
+{
+ HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
+}
+
+
/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
* not be touching any unprotected shared resource during this period. Usually
* this is called before poll(), but it may also be placed around very slow
_update_fd(fd);
}
+ thread_idle_now();
thread_harmless_now();
/* now let's wait for polled events */
tv_leaving_poll(wait_time, status);
thread_harmless_end();
+ thread_idle_end();
+
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
_update_fd(fd);
}
+ thread_idle_now();
thread_harmless_now();
/*
tv_leaving_poll(wait_time, nevlist);
thread_harmless_end();
+ thread_idle_end();
+
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
changes = _update_fd(fd, changes);
}
+ thread_idle_now();
thread_harmless_now();
if (changes) {
tv_leaving_poll(wait_time, status);
thread_harmless_end();
+ thread_idle_end();
+
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
break;
} while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+ thread_idle_now();
thread_harmless_now();
fd_nbupdt = 0;
tv_leaving_poll(wait_time, status);
thread_harmless_end();
+ thread_idle_end();
+
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
break;
} while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+ thread_idle_now();
thread_harmless_now();
fd_nbupdt = 0;
tv_leaving_poll(delta_ms, status);
thread_harmless_end();
+ thread_idle_end();
+
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
volatile unsigned long threads_want_rdv_mask __read_mostly = 0;
volatile unsigned long threads_harmless_mask = 0;
+volatile unsigned long threads_idle_mask = 0;
volatile unsigned long threads_sync_mask = 0;
volatile unsigned long all_threads_mask __read_mostly = 1; // nbthread 1 assumed by default
THREAD_LOCAL unsigned int tid = 0;
*/
}
+/* Isolates the current thread : request the ability to work while all other
+ * threads are idle, as defined by thread_idle_now(). It only returns once
+ * all of them are both harmless and idle, with the current thread's bit in
+ * threads_harmless_mask and idle_mask cleared. Needs to be completed using
+ * thread_release(). By doing so the thread also engages in being safe against
+ * any actions that other threads might be about to start under the same
+ * conditions. This specifically targets destruction of any internal structure,
+ * which implies that the current thread may not hold references to any object.
+ *
+ * Note that a concurrent thread_isolate() will usually win against
+ * thread_isolate_full() as it doesn't consider the idle_mask, allowing it to
+ * get back to the poller or any other fully idle location, that will
+ * ultimately release this one.
+ */
+void thread_isolate_full()
+{
+ unsigned long old;
+
+ _HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
+ _HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
+ __ha_barrier_atomic_store();
+ _HA_ATOMIC_OR(&threads_want_rdv_mask, tid_bit);
+
+ /* wait for all threads to become harmless */
+ old = threads_harmless_mask;
+ while (1) {
+ unsigned long idle = _HA_ATOMIC_LOAD(&threads_idle_mask);
+
+ if (unlikely((old & all_threads_mask) != all_threads_mask))
+ old = _HA_ATOMIC_LOAD(&threads_harmless_mask);
+ else if ((idle & all_threads_mask) == all_threads_mask &&
+ _HA_ATOMIC_CAS(&threads_harmless_mask, &old, old & ~tid_bit))
+ break;
+
+ ha_thread_relax();
+ }
+
+ /* we're not idle anymore at this point. Other threads waiting on this
+ * condition will need to wait until out next pass to the poller, or
+ * our next call to thread_isolate_full().
+ */
+ _HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
+}
+
/* Cancels the effect of thread_isolate() by releasing the current thread's bit
* in threads_want_rdv_mask. This immediately allows other threads to expect be
* executed, though they will first have to wait for this thread to become