]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.4.25/lib-atomic64-initialize-locks-statically-to-fix-early-users.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.4.25 / lib-atomic64-initialize-locks-statically-to-fix-early-users.patch
1 From fcc16882ac4532aaa644bff444f0c5d6228ba71e Mon Sep 17 00:00:00 2001
2 From: Stephen Boyd <sboyd@codeaurora.org>
3 Date: Wed, 19 Dec 2012 23:39:48 -0800
4 Subject: lib: atomic64: Initialize locks statically to fix early users
5
6 From: Stephen Boyd <sboyd@codeaurora.org>
7
8 commit fcc16882ac4532aaa644bff444f0c5d6228ba71e upstream.
9
10 The atomic64 library uses a handful of static spin locks to implement
11 atomic 64-bit operations on architectures without support for atomic
12 64-bit instructions.
13
14 Unfortunately, the spinlocks are initialized in a pure initcall and that
15 is too late for the vfs namespace code which wants to use atomic64
16 operations before the initcall is run.
17
18 This became a problem as of commit 8823c079ba71: "vfs: Add setns support
19 for the mount namespace".
20
21 This leads to BUG messages such as:
22
23 BUG: spinlock bad magic on CPU#0, swapper/0/0
24 lock: atomic64_lock+0x240/0x400, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
25 do_raw_spin_lock+0x158/0x198
26 _raw_spin_lock_irqsave+0x4c/0x58
27 atomic64_add_return+0x30/0x5c
28 alloc_mnt_ns.clone.14+0x44/0xac
29 create_mnt_ns+0xc/0x54
30 mnt_init+0x120/0x1d4
31 vfs_caches_init+0xe0/0x10c
32 start_kernel+0x29c/0x300
33
34 coming out early on during boot when spinlock debugging is enabled.
35
36 Fix this by initializing the spinlocks statically at compile time.
37
38 Reported-and-tested-by: Vaibhav Bedia <vaibhav.bedia@ti.com>
39 Tested-by: Tony Lindgren <tony@atomide.com>
40 Cc: Eric W. Biederman <ebiederm@xmission.com>
41 Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
42 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
43 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
44 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
45
46 ---
47 lib/atomic64.c | 17 +++++------------
48 1 file changed, 5 insertions(+), 12 deletions(-)
49
50 --- a/lib/atomic64.c
51 +++ b/lib/atomic64.c
52 @@ -31,7 +31,11 @@
53 static union {
54 raw_spinlock_t lock;
55 char pad[L1_CACHE_BYTES];
56 -} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
57 +} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
58 + [0 ... (NR_LOCKS - 1)] = {
59 + .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
60 + },
61 +};
62
63 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
64 {
65 @@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, l
66 return ret;
67 }
68 EXPORT_SYMBOL(atomic64_add_unless);
69 -
70 -static int init_atomic64_lock(void)
71 -{
72 - int i;
73 -
74 - for (i = 0; i < NR_LOCKS; ++i)
75 - raw_spin_lock_init(&atomic64_lock[i].lock);
76 - return 0;
77 -}
78 -
79 -pure_initcall(init_atomic64_lock);