]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Russ Anderson <rja@sgi.com> |
2 | Subject: ia64: cpe_migrate.ko causes deadlock. | |
3 | References: bnc#464676 | |
4 | ||
5 | schedule_on_each_cpu() deadlocks when called from an event thread. | |
6 | Change cpe_migrate to use a kthread to avoid the problem. | |
7 | ||
8 | Signed-off-by: Russ Anderson <rja@sgi.com> | |
9 | Acked-by: Raymund Will <rw@suse.de> | |
10 | ||
11 | --- | |
12 | arch/ia64/kernel/cpe_migrate.c | 72 +++++++++++++++++++++++++++++++---------- | |
13 | 1 file changed, 56 insertions(+), 16 deletions(-) | |
14 | ||
15 | Index: linux/arch/ia64/kernel/cpe_migrate.c | |
16 | =================================================================== | |
17 | --- linux.orig/arch/ia64/kernel/cpe_migrate.c 2009-01-09 11:37:47.130269369 -0600 | |
18 | +++ linux/arch/ia64/kernel/cpe_migrate.c 2009-01-09 11:44:43.658280930 -0600 | |
19 | @@ -22,6 +22,7 @@ | |
20 | #include <linux/page-isolation.h> | |
21 | #include <linux/memcontrol.h> | |
22 | #include <linux/kobject.h> | |
23 | +#include <linux/kthread.h> | |
24 | ||
25 | #include <asm/page.h> | |
26 | #include <asm/system.h> | |
27 | @@ -40,12 +41,15 @@ static struct cpe_info cpe[CE_HISTORY_LE | |
28 | static int cpe_polling_enabled = 1; | |
29 | static int cpe_head; | |
30 | static int cpe_tail; | |
31 | -static int work_scheduled; | |
32 | static int mstat_cannot_isolate; | |
33 | static int mstat_failed_to_discard; | |
34 | static int mstat_already_marked; | |
35 | static int mstat_already_on_list; | |
36 | ||
37 | +/* IRQ handler notifies this wait queue on receipt of an IRQ */ | |
38 | +DECLARE_WAIT_QUEUE_HEAD(cpe_activate_IRQ_wq); | |
39 | +static DECLARE_COMPLETION(kthread_cpe_migrated_exited); | |
40 | +int cpe_active; | |
41 | DEFINE_SPINLOCK(cpe_migrate_lock); | |
42 | ||
43 | static void | |
44 | @@ -159,12 +163,12 @@ ia64_mca_cpe_move_page(u64 paddr, u32 no | |
45 | } | |
46 | ||
47 | /* | |
48 | - * ia64_mca_cpe_migrate | |
49 | - * The worker that does the actual migration. It pulls a | |
50 | - * physical address off the list and calls the migration code. | |
51 | + * cpe_process_queue | |
52 | + * Pulls the physical address off the list and calls the migration code. | |
53 | + * Will process all the addresses on the list. | |
54 | */ | |
55 | -static void | |
56 | -ia64_mca_cpe_migrate(struct work_struct *unused) | |
57 | +void | |
58 | +cpe_process_queue(void) | |
59 | { | |
60 | int ret; | |
61 | u64 paddr; | |
62 | @@ -192,10 +196,36 @@ ia64_mca_cpe_migrate(struct work_struct | |
63 | cpe_tail = 0; | |
64 | ||
65 | } while (cpe_tail != cpe_head); | |
66 | - work_scheduled = 0; | |
67 | + return; | |
68 | +} | |
69 | + | |
70 | +inline int | |
71 | +cpe_list_empty(void) | |
72 | +{ | |
73 | + return (cpe_head == cpe_tail) && (!cpe[cpe_head].paddr); | |
74 | +} | |
75 | + | |
76 | +/* | |
77 | + * kthread_cpe_migrate | |
78 | + * kthread_cpe_migrate is created at module load time and lives | |
79 | + * until the module is removed. When not active, it will sleep. | |
80 | + */ | |
81 | +static int | |
82 | +kthread_cpe_migrate(void *ignore) | |
83 | +{ | |
84 | + while (cpe_active) { | |
85 | + /* | |
86 | + * wait for work | |
87 | + */ | |
88 | + (void)wait_event_interruptible(cpe_activate_IRQ_wq, | |
89 | + (!cpe_list_empty() || | |
90 | + !cpe_active)); | |
91 | + cpe_process_queue(); /* process work */ | |
92 | + } | |
93 | + complete(&kthread_cpe_migrated_exited); | |
94 | + return 0; | |
95 | } | |
96 | ||
97 | -static DECLARE_WORK(cpe_enable_work, ia64_mca_cpe_migrate); | |
98 | DEFINE_SPINLOCK(cpe_list_lock); | |
99 | ||
100 | /* | |
101 | @@ -227,10 +257,7 @@ cpe_setup_migrate(void *rec) | |
102 | if (ret < 0) | |
103 | return -EINVAL; | |
104 | ||
105 | - if ((cpe_head != cpe_tail) || (cpe[cpe_head].paddr != 0)) | |
106 | - /* | |
107 | - * List not empty | |
108 | - */ | |
109 | + if (!cpe_list_empty()) | |
110 | for (i = 0; i < CE_HISTORY_LENGTH; i++) { | |
111 | if (PAGE_ALIGN(cpe[i].paddr) == PAGE_ALIGN(paddr)) { | |
112 | mstat_already_on_list++; | |
113 | @@ -255,10 +282,7 @@ cpe_setup_migrate(void *rec) | |
114 | } | |
115 | spin_unlock(&cpe_list_lock); | |
116 | ||
117 | - if (!work_scheduled) { | |
118 | - work_scheduled = 1; | |
119 | - schedule_work(&cpe_enable_work); | |
120 | - } | |
121 | + wake_up_interruptible(&cpe_activate_IRQ_wq); | |
122 | ||
123 | return 1; | |
124 | } | |
125 | @@ -395,12 +419,23 @@ static int __init | |
126 | cpe_migrate_external_handler_init(void) | |
127 | { | |
128 | int error; | |
129 | + struct task_struct *kthread; | |
130 | ||
131 | error = sysfs_create_file(kernel_kobj, &badram_attr.attr); | |
132 | if (error) | |
133 | return -EINVAL; | |
134 | ||
135 | /* | |
136 | + * set up the kthread | |
137 | + */ | |
138 | + cpe_active = 1; | |
139 | + kthread = kthread_run(kthread_cpe_migrate, NULL, "cpe_migrate"); | |
140 | + if (IS_ERR(kthread)) { | |
141 | + complete(&kthread_cpe_migrated_exited); | |
142 | + return -EFAULT; | |
143 | + } | |
144 | + | |
145 | + /* | |
146 | * register external ce handler | |
147 | */ | |
148 | if (ia64_reg_CE_extension(cpe_setup_migrate)) { | |
149 | @@ -418,6 +453,11 @@ cpe_migrate_external_handler_exit(void) | |
150 | { | |
151 | /* unregister external mca handlers */ | |
152 | ia64_unreg_CE_extension(); | |
153 | + | |
154 | + /* Stop kthread */ | |
155 | + cpe_active = 0; /* tell kthread_cpe_migrate to exit */ | |
156 | + wake_up_interruptible(&cpe_activate_IRQ_wq); | |
157 | + wait_for_completion(&kthread_cpe_migrated_exited); | |
158 | ||
159 | sysfs_remove_file(kernel_kobj, &badram_attr.attr); | |
160 | } | |
161 |