From 2f8fd1da0e52f99138ed489016c774121a33e942 Mon Sep 17 00:00:00 2001 From: Tom de Vries Date: Fri, 15 Jul 2022 17:45:53 +0200 Subject: [PATCH] [gdb] Fix data race in bcache::insert Data race between: ... Read of size 8 at 0x7bb4000000d0 by thread T4: #0 gdb::bcache::insert(void const*, int, bool*) gdb/bcache.c:155 #1 objfile_per_bfd_storage::intern(char const*) gdb/objfiles.h:250 #2 objfile::intern(char const*) gdb/objfiles.h:475 #3 dwarf2_canonicalize_name gdb/dwarf2/read.c:21904 #4 dwarf2_name gdb/dwarf2/read.c:21999 #5 read_base_type gdb/dwarf2/read.c:17092 #6 read_type_die_1 gdb/dwarf2/read.c:21529 #7 read_type_die gdb/dwarf2/read.c:21464 #8 process_die gdb/dwarf2/read.c:8674 #9 read_file_scope gdb/dwarf2/read.c:9610 #10 process_die gdb/dwarf2/read.c:8614 #11 process_full_comp_unit gdb/dwarf2/read.c:8383 #12 process_queue_item gdb/dwarf2/read.c:7592 ... and: ... Previous write of size 8 at 0x7bb4000000d0 by main thread: #0 gdb::bcache::insert(void const*, int, bool*) gdb/bcache.c:167 #1 objfile_per_bfd_storage::intern(std::__cxx11::basic_string, std::allocator > const&) gdb/objfiles.h:257 #2 objfile::intern(std::__cxx11::basic_string, std::allocator > const&) #3 dwarf2_compute_name gdb/dwarf2/read.c:9050 #4 dwarf2_full_name gdb/dwarf2/read.c:9070 #5 read_structure_type gdb/dwarf2/read.c:14558 #6 process_structure_scope gdb/dwarf2/read.c:14847 #7 process_die gdb/dwarf2/read.c:8643 #8 read_file_scope gdb/dwarf2/read.c:9610 #9 process_die gdb/dwarf2/read.c:8614 #10 process_full_comp_unit gdb/dwarf2/read.c:8383 #11 process_queue_item gdb/dwarf2/read.c:7592 ... --- gdb/bcache.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/gdb/bcache.c b/gdb/bcache.c index 80ba1f6b6ec..a2b7ba11369 100644 --- a/gdb/bcache.c +++ b/gdb/bcache.c @@ -24,6 +24,11 @@ #include "bcache.h" #include +#include + +#if CXX_STD_THREAD +static std::mutex bcache_lock; +#endif namespace gdb { @@ -63,6 +68,9 @@ struct bstring void bcache::expand_hash_table () { +#if CXX_STD_THREAD + //std::lock_guard guard (bcache_lock); +#endif /* A table of good hash table sizes. Whenever we grow, we pick the next larger size from this table. sizes[i] is close to 1 << (i+10), so we roughly double the table size each time. After we fall off @@ -142,6 +150,9 @@ bcache::expand_hash_table () const void * bcache::insert (const void *addr, int length, bool *added) { +#if CXX_STD_THREAD + std::lock_guard guard (bcache_lock); +#endif unsigned long full_hash; unsigned short half_hash; int hash_index; -- 2.47.2