--- /dev/null
+/*
+ This file is part of drd, a thread error detector.
+
+ Copyright (C) 1990-2011 Linus Torvalds and other kernel authors.
+ Copyright (C) 2012 Bart Van Assche <bvanassche@acm.org>.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef _DRD_LIST_H_
+#define _DRD_LIST_H_
+
+/*
+ * Doubly linked lists. See also the Linux kernel headers <linux/types.h>.
+ */
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+struct list_head {
+ struct list_head *next;
+ struct list_head *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+static inline void init_list_head(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = NULL;
+ entry->prev = NULL;
+}
+
+/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_next_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+#define list_for_each_entry_reverse_continue(pos, head, member) \
+ for ( ; \
+ &pos->member != (head); \
+ pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+#endif /* _DRD_LIST_H_ */
s_join_list_vol = jlv;
}
+void DRD_(thread_init)(void)
+{
+ int i;
+
+ for (i = 0; i < DRD_N_THREADS; i++)
+ init_list_head(&DRD_(g_threadinfo)[i].sg_list);
+}
+
/**
* Convert Valgrind's ThreadId into a DrdThreadId.
*
DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
DRD_(g_threadinfo)[i].synchr_nesting = 0;
DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
- tl_assert(DRD_(g_threadinfo)[i].first == 0);
- tl_assert(DRD_(g_threadinfo)[i].last == 0);
+ tl_assert(list_empty(&DRD_(g_threadinfo)[i].sg_list));
tl_assert(DRD_(IsValidDrdThreadId)(i));
tl_assert(0 <= (int)created && created < DRD_N_THREADS
&& created != DRD_INVALID_THREADID);
- tl_assert(DRD_(g_threadinfo)[created].first == 0);
- tl_assert(DRD_(g_threadinfo)[created].last == 0);
+ tl_assert(list_empty(&DRD_(g_threadinfo)[created].sg_list));
/* Create an initial segment for the newly created thread. */
thread_append_segment(created, DRD_(sg_new)(creator, created));
tl_assert(DRD_(IsValidDrdThreadId)(tid));
tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
- for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
+ list_for_each_entry_safe(sg, sg_prev, &DRD_(g_threadinfo)[tid].sg_list,
+ thr_list)
{
- sg_prev = sg->prev;
- sg->prev = 0;
- sg->next = 0;
+ list_del(&sg->thr_list);
DRD_(sg_put)(sg);
}
DRD_(g_threadinfo)[tid].valid = False;
DRD_(g_threadinfo)[tid].detached_posix_thread = False;
else
tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
- DRD_(g_threadinfo)[tid].first = 0;
- DRD_(g_threadinfo)[tid].last = 0;
+ tl_assert(list_empty(&DRD_(g_threadinfo)[tid].sg_list));
tl_assert(! DRD_(IsValidDrdThreadId)(tid));
}
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
#endif
- sg->prev = DRD_(g_threadinfo)[tid].last;
- sg->next = 0;
- if (DRD_(g_threadinfo)[tid].last)
- DRD_(g_threadinfo)[tid].last->next = sg;
- DRD_(g_threadinfo)[tid].last = sg;
- if (DRD_(g_threadinfo)[tid].first == 0)
- DRD_(g_threadinfo)[tid].first = sg;
+ list_add_tail(&sg->thr_list, &DRD_(g_threadinfo)[tid].sg_list);
#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
#endif
- if (sg->prev)
- sg->prev->next = sg->next;
- if (sg->next)
- sg->next->prev = sg->prev;
- if (sg == DRD_(g_threadinfo)[tid].first)
- DRD_(g_threadinfo)[tid].first = sg->next;
- if (sg == DRD_(g_threadinfo)[tid].last)
- DRD_(g_threadinfo)[tid].last = sg->prev;
+ list_del(&sg->thr_list);
DRD_(sg_put)(sg);
#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
*/
VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
{
+ struct list_head* sg_list;
+
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
- tl_assert(DRD_(g_threadinfo)[tid].last);
- return &DRD_(g_threadinfo)[tid].last->vc;
+ sg_list = &DRD_(g_threadinfo)[tid].sg_list;
+ tl_assert(!list_empty(sg_list));
+ return &list_last_entry(sg_list, Segment, thr_list)->vc;
}
/**
*/
void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
{
+ struct list_head* sg_list;
+
tl_assert(sg);
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
- tl_assert(DRD_(g_threadinfo)[tid].last);
+ sg_list = &DRD_(g_threadinfo)[tid].sg_list;
+ tl_assert(!list_empty(sg_list));
DRD_(sg_put)(*sg);
- *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
+ *sg = DRD_(sg_get)(list_last_entry(sg_list, Segment, thr_list));
}
/**
{
unsigned i;
Bool first;
+ struct list_head* sg_list;
Segment* latest_sg;
first = True;
for (i = 0; i < DRD_N_THREADS; i++)
{
- latest_sg = DRD_(g_threadinfo)[i].last;
- if (latest_sg)
- {
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ if (!list_empty(sg_list)) {
+ latest_sg = list_last_entry(sg_list, Segment, thr_list);
if (first)
DRD_(vc_assign)(vc, &latest_sg->vc);
else
{
unsigned i;
Bool first;
+ struct list_head* sg_list;
Segment* latest_sg;
first = True;
for (i = 0; i < DRD_N_THREADS; i++)
{
- latest_sg = DRD_(g_threadinfo)[i].last;
- if (latest_sg)
- {
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ if (!list_empty(sg_list)) {
+ latest_sg = list_last_entry(sg_list, Segment, thr_list);
if (first)
DRD_(vc_assign)(vc, &latest_sg->vc);
else
{
Segment* sg;
Segment* sg_next;
- for (sg = DRD_(g_threadinfo)[i].first;
- sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
- sg = sg_next)
- {
+ struct list_head* sg_list;
+
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ list_for_each_entry_safe(sg, sg_next, sg_list, thr_list) {
+ if (list_is_last(&sg->thr_list, sg_list)
+ || !DRD_(vc_lte)(&sg->vc, &thread_vc_min))
+ break;
thread_discard_segment(i, sg);
}
}
Segment* const sg2)
{
unsigned i;
+ struct list_head* sg_list;
- tl_assert(sg1->next);
- tl_assert(sg2->next);
- tl_assert(sg1->next == sg2);
+ sg_list = &DRD_(g_threadinfo)[tid].sg_list;
+ tl_assert(!list_is_last(&sg1->thr_list, sg_list));
+ tl_assert(!list_is_last(&sg2->thr_list, sg_list));
+ tl_assert(list_next_entry(&sg1->thr_list, Segment, thr_list) == sg2);
tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
for (i = 0; i < DRD_N_THREADS; i++)
{
Segment* sg;
- for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ list_for_each_entry(sg, sg_list, thr_list)
{
- if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
+ if (list_is_last(&sg->thr_list, sg_list)
+ || DRD_(sg_get_refcnt)(sg) > 1)
{
if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
break;
return False;
}
}
- for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
+ list_for_each_entry_reverse(sg, sg_list, thr_list)
{
- if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
+ if (list_is_last(&sg->thr_list, sg_list)
+ || DRD_(sg_get_refcnt)(sg) > 1)
{
if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
break;
tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
#endif
- for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
+ struct list_head* sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ list_for_each_entry(sg, sg_list, thr_list)
{
if (DRD_(sg_get_refcnt)(sg) == 1
- && sg->next
- && DRD_(sg_get_refcnt)(sg->next) == 1
- && sg->next->next
- && thread_consistent_segment_ordering(i, sg, sg->next))
- {
- /* Merge sg and sg->next into sg. */
- DRD_(sg_merge)(sg, sg->next);
- thread_discard_segment(i, sg->next);
+ && !list_is_last(&sg->thr_list, sg_list)) {
+ Segment* sg_next = list_next_entry(&sg->thr_list, Segment,
+ thr_list);
+ if (DRD_(sg_get_refcnt)(sg_next) == 1
+ && !list_is_last(&sg_next->thr_list, sg_list)
+ && thread_consistent_segment_ordering(i, sg, sg_next))
+ {
+ /* Merge sg and sg_next into sg. */
+ DRD_(sg_merge)(sg, sg_next);
+ thread_discard_segment(i, sg_next);
+ }
}
}
*/
void DRD_(thread_new_segment)(const DrdThreadId tid)
{
+ struct list_head* sg_list;
Segment* last_sg;
Segment* new_sg;
&& tid != DRD_INVALID_THREADID);
tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
- last_sg = DRD_(g_threadinfo)[tid].last;
+ sg_list = &DRD_(g_threadinfo)[tid].sg_list;
+ last_sg = list_empty(sg_list) ? NULL
+ : list_last_entry(sg_list, Segment, thr_list);
new_sg = DRD_(sg_new)(tid, tid);
thread_append_segment(tid, new_sg);
if (tid == DRD_(g_drd_running_tid) && last_sg)
&& joiner != DRD_INVALID_THREADID);
tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
&& joinee != DRD_INVALID_THREADID);
- tl_assert(DRD_(g_threadinfo)[joiner].last);
- tl_assert(DRD_(g_threadinfo)[joinee].last);
+ tl_assert(!list_empty(&DRD_(g_threadinfo)[joiner].sg_list));
+ tl_assert(!list_empty(&DRD_(g_threadinfo)[joinee].sg_list));
if (DRD_(sg_get_trace)())
{
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
- tl_assert(DRD_(g_threadinfo)[tid].last);
+ tl_assert(!list_empty(&DRD_(g_threadinfo)[tid].sg_list));
tl_assert(sg);
tl_assert(vc);
*/
void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
{
- unsigned i;
Segment* p;
- for (i = 0; i < DRD_N_THREADS; i++)
- for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
- DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
+ list_for_each_entry(p, &DRD_(g_sg_list), g_list)
+ DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
}
void DRD_(thread_print_all)(void)
{
unsigned i;
+ struct list_head* sg_list;
Segment* p;
for (i = 0; i < DRD_N_THREADS; i++)
{
- if (DRD_(g_threadinfo)[i].first)
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ if (!list_empty(sg_list))
{
VG_(printf)("**************\n"
"* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
DRD_(g_threadinfo)[i].posix_thread_exists,
DRD_(g_threadinfo)[i].pt_threadid,
DRD_(g_threadinfo)[i].detached_posix_thread);
- for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
- {
+ list_for_each_entry(p, sg_list, thr_list)
DRD_(sg_print)(p);
- }
}
}
}
if (i != tid)
{
Segment* q;
- for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
- {
+ struct list_head *sg_list;
+
+ sg_list = &DRD_(g_threadinfo)[i].sg_list;
+ list_for_each_entry_reverse(q, sg_list, thr_list) {
/*
* Since q iterates over the segments of thread i in order of
* decreasing vector clocks, if q->vc <= p->vc, then
if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
access_type))
{
+ Segment* q_next;
+
tl_assert(q->stacktrace);
if (VG_(clo_xml))
VG_(printf_xml)(" <other_segment_start>\n");
else
VG_(message)(Vg_UserMsg,
"Other segment end (thread %d)\n", i);
- show_call_stack(i, q->next ? q->next->stacktrace : 0);
+ q_next = list_is_last(&q->thr_list, sg_list)
+ ? NULL : list_next_entry(&q->thr_list, Segment, thr_list);
+ show_call_stack(i, q_next ? q_next->stacktrace : 0);
if (VG_(clo_xml))
VG_(printf_xml)(" </other_segment_end>\n");
}
tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
&& tid != DRD_INVALID_THREADID);
- for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
- {
+ list_for_each_entry(p, &DRD_(g_threadinfo)[tid].sg_list, thr_list) {
if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
- {
thread_report_conflicting_segments_segment(tid, addr, size,
access_type, p);
- }
}
}
VG_(free)(str);
}
- p = DRD_(g_threadinfo)[tid].last;
+ p = list_last_entry(&DRD_(g_threadinfo)[tid].sg_list, Segment, thr_list);
{
unsigned j;
if (j != tid && DRD_(IsValidDrdThreadId)(j))
{
Segment* q;
- for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
- {
+ list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list,
+ thr_list) {
if (! DRD_(vc_lte)(&q->vc, &p->vc)
&& ! DRD_(vc_lte)(&p->vc, &q->vc))
{
if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
continue;
- for (q = DRD_(g_threadinfo)[j].last;
- q && !DRD_(vc_lte)(&q->vc, new_vc);
- q = q->prev) {
- const Bool included_in_old_conflict_set
- = !DRD_(vc_lte)(old_vc, &q->vc);
- const Bool included_in_new_conflict_set
- = !DRD_(vc_lte)(new_vc, &q->vc);
+ list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list, thr_list) {
+ Bool included_in_old_conflict_set, included_in_new_conflict_set;
+
+ if (DRD_(vc_lte)(&q->vc, new_vc))
+ break;
+
+ included_in_old_conflict_set = !DRD_(vc_lte)(old_vc, &q->vc);
+ included_in_new_conflict_set = !DRD_(vc_lte)(new_vc, &q->vc);
if (UNLIKELY(s_trace_conflict_set)) {
char* str;
DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
}
- for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->prev) {
- const Bool included_in_old_conflict_set
- = !DRD_(vc_lte)(old_vc, &q->vc);
- const Bool included_in_new_conflict_set
- = !DRD_(vc_lte)(&q->vc, new_vc)
- && !DRD_(vc_lte)(new_vc, &q->vc);
+ list_for_each_entry_reverse_continue(q, &DRD_(g_threadinfo)[j].sg_list,
+ thr_list) {
+ Bool included_in_old_conflict_set, included_in_new_conflict_set;
+
+ if (DRD_(vc_lte)(&q->vc, old_vc))
+ break;
+
+ included_in_old_conflict_set = !DRD_(vc_lte)(old_vc, &q->vc);
+ included_in_new_conflict_set
+ = !DRD_(vc_lte)(&q->vc, new_vc) && !DRD_(vc_lte)(new_vc, &q->vc);
if (UNLIKELY(s_trace_conflict_set)) {
char* str;
DRD_(bm_clear_marked)(DRD_(g_conflict_set));
- p = DRD_(g_threadinfo)[tid].last;
+ p = list_last_entry(&DRD_(g_threadinfo)[tid].sg_list, Segment, thr_list);
for (j = 0; j < DRD_N_THREADS; j++)
{
if (j != tid && DRD_(IsValidDrdThreadId)(j))
{
Segment* q;
- for (q = DRD_(g_threadinfo)[j].last;
- q && !DRD_(vc_lte)(&q->vc, &p->vc);
- q = q->prev) {
+ list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list,
+ thr_list) {
+ if (DRD_(vc_lte)(&q->vc, &p->vc))
+ break;
if (!DRD_(vc_lte)(&p->vc, &q->vc))
DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
}