From: Maxim Kuvyrkov Date: Mon, 23 Dec 2013 20:44:50 +0000 (+1300) Subject: Fix race in free() of fastbin chunk: BZ #15073 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8b43a2274a593ce91e673db1cfac6e808134bc84;p=thirdparty%2Fglibc.git Fix race in free() of fastbin chunk: BZ #15073 Perform sanity check only if we have_lock. Due to lockless nature of fastbins we need to be careful derefencing pointers to fastbin entries (chunksize(old) in this case) in multithreaded environments. The fix is to add have_lock to the if-condition checks. The rest of the patch only makes code more readable. * malloc/malloc.c (_int_free): Perform sanity check only if we have_lock. Conflicts: ChangeLog NEWS --- diff --git a/ChangeLog b/ChangeLog index 1e5efa74c03..74d433076e2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +2014-01-04 Maxim Kuvyrkov + Ondřej Bílka + + [BZ #15073] + * malloc/malloc.c (_int_free): Perform sanity check only if we + have_lock. + 2013-11-11 David S. Miller [BZ #16150] diff --git a/NEWS b/NEWS index 6c7aaa7c213..29856cc046f 100644 --- a/NEWS +++ b/NEWS @@ -9,7 +9,7 @@ Version 2.18.1 * The following bugs are resolved with this release: - 15128, 15909, 15996, 16150. + 15073, 15128, 15909, 15996, 16150. Version 2.18 diff --git a/malloc/malloc.c b/malloc/malloc.c index be472b2ba38..8a0eb854e26 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -3809,25 +3809,29 @@ _int_free(mstate av, mchunkptr p, int have_lock) unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx); - mchunkptr fd; - mchunkptr old = *fb; + /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ + mchunkptr old = *fb, old2; unsigned int old_idx = ~0u; do { - /* Another simple check: make sure the top of the bin is not the - record we are going to add (i.e., double free). */ + /* Check that the top of the bin is not the record we are going to add + (i.e., double free). */ if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } - if (old != NULL) + /* Check that size of fastbin chunk at the top is the same as + size of the chunk that we are adding. We can dereference OLD + only if we have the lock, otherwise it might have already been + deallocated. See use of OLD_IDX below for the actual check. */ + if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); - p->fd = fd = old; + p->fd = old2 = old; } - while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd); + while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); - if (fd != NULL && __builtin_expect (old_idx != idx, 0)) + if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0)) { errstr = "invalid fastbin entry (free)"; goto errout;