From: Greg Kroah-Hartman Date: Thu, 18 Oct 2012 20:07:06 +0000 (-0700) Subject: 3.0-stable patches X-Git-Tag: v3.0.47~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d77040b21df23479cd2003a4ac87238d094dfc52;p=thirdparty%2Fkernel%2Fstable-queue.git 3.0-stable patches added patches: slab-fix-the-deadlock-issue-on-l3-alien-lock.patch --- diff --git a/queue-3.0/series b/queue-3.0/series index 9051129cada..790fb0f44d3 100644 --- a/queue-3.0/series +++ b/queue-3.0/series @@ -33,3 +33,4 @@ x86-random-architectural-inlines-to-get-random-integers-with-rdrand.patch x86-random-verify-rdrand-functionality-and-allow-it-to-be-disabled.patch tpm-propagate-error-from-tpm_transmit-to-fix-a-timeout-hang.patch udf-fix-retun-value-on-error-path-in-udf_load_logicalvol.patch +slab-fix-the-deadlock-issue-on-l3-alien-lock.patch diff --git a/queue-3.0/slab-fix-the-deadlock-issue-on-l3-alien-lock.patch b/queue-3.0/slab-fix-the-deadlock-issue-on-l3-alien-lock.patch new file mode 100644 index 00000000000..d24698bb48f --- /dev/null +++ b/queue-3.0/slab-fix-the-deadlock-issue-on-l3-alien-lock.patch @@ -0,0 +1,100 @@ +From 947ca1856a7e60aa6d20536785e6a42dff25aa6e Mon Sep 17 00:00:00 2001 +From: Michael Wang +Date: Wed, 5 Sep 2012 10:33:18 +0800 +Subject: slab: fix the DEADLOCK issue on l3 alien lock + +From: Michael Wang + +commit 947ca1856a7e60aa6d20536785e6a42dff25aa6e upstream. + +DEADLOCK will be report while running a kernel with NUMA and LOCKDEP enabled, +the process of this fake report is: + + kmem_cache_free() //free obj in cachep + -> cache_free_alien() //acquire cachep's l3 alien lock + -> __drain_alien_cache() + -> free_block() + -> slab_destroy() + -> kmem_cache_free() //free slab in cachep->slabp_cache + -> cache_free_alien() //acquire cachep->slabp_cache's l3 alien lock + +Since the cachep and cachep->slabp_cache's l3 alien are in the same lock class, +fake report generated. + +This should not happen since we already have init_lock_keys() which will +reassign the lock class for both l3 list and l3 alien. + +However, init_lock_keys() was invoked at a wrong position which is before we +invoke enable_cpucache() on each cache. + +Since until set slab_state to be FULL, we won't invoke enable_cpucache() +on caches to build their l3 alien while creating them, so although we invoked +init_lock_keys(), the l3 alien lock class won't change since we don't have +them until invoked enable_cpucache() later. + +This patch will invoke init_lock_keys() after we done enable_cpucache() +instead of before to avoid the fake DEADLOCK report. + +Michael traced the problem back to a commit in release 3.0.0: + +commit 30765b92ada267c5395fc788623cb15233276f5c +Author: Peter Zijlstra +Date: Thu Jul 28 23:22:56 2011 +0200 + + slab, lockdep: Annotate the locks before using them + + Fernando found we hit the regular OFF_SLAB 'recursion' before we + annotate the locks, cure this. + + The relevant portion of the stack-trace: + + > [ 0.000000] [] rt_spin_lock+0x50/0x56 + > [ 0.000000] [] __cache_free+0x43/0xc3 + > [ 0.000000] [] kmem_cache_free+0x6c/0xdc + > [ 0.000000] [] slab_destroy+0x4f/0x53 + > [ 0.000000] [] free_block+0x94/0xc1 + > [ 0.000000] [] do_tune_cpucache+0x10b/0x2bb + > [ 0.000000] [] enable_cpucache+0x7b/0xa7 + > [ 0.000000] [] kmem_cache_init_late+0x1f/0x61 + > [ 0.000000] [] start_kernel+0x24c/0x363 + > [ 0.000000] [] i386_start_kernel+0xa9/0xaf + + Reported-by: Fernando Lopez-Lezcano + Acked-by: Pekka Enberg + Signed-off-by: Peter Zijlstra + Link: http://lkml.kernel.org/r/1311888176.2617.379.camel@laptop + Signed-off-by: Ingo Molnar + +The commit moved init_lock_keys() before we build up the alien, so we +failed to reclass it. + +Acked-by: Christoph Lameter +Tested-by: Paul E. McKenney +Signed-off-by: Michael Wang +Signed-off-by: Pekka Enberg +Signed-off-by: Greg Kroah-Hartman + +diff --git a/mm/slab.c b/mm/slab.c +index 3b4587b..cd5a926 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -1774,9 +1774,6 @@ void __init kmem_cache_init_late(void) + + slab_state = UP; + +- /* Annotate slab for lockdep -- annotate the malloc caches */ +- init_lock_keys(); +- + /* 6) resize the head arrays to their final sizes */ + mutex_lock(&slab_mutex); + list_for_each_entry(cachep, &slab_caches, list) +@@ -1784,6 +1781,9 @@ void __init kmem_cache_init_late(void) + BUG(); + mutex_unlock(&slab_mutex); + ++ /* Annotate slab for lockdep -- annotate the malloc caches */ ++ init_lock_keys(); ++ + /* Done! */ + slab_state = FULL; +