1 From: Russ Anderson <rja@sgi.com>
2 Subject: ia64: cpe_migrate.ko causes deadlock.
5 schedule_on_each_cpu() deadlocks when called from an event thread.
6 Change cpe_migrate to use a kthread to avoid the problem.
8 Signed-off-by: Russ Anderson <rja@sgi.com>
9 Acked-by: Raymund Will <rw@suse.de>
12 arch/ia64/kernel/cpe_migrate.c | 72 +++++++++++++++++++++++++++++++----------
13 1 file changed, 56 insertions(+), 16 deletions(-)
15 Index: linux/arch/ia64/kernel/cpe_migrate.c
16 ===================================================================
17 --- linux.orig/arch/ia64/kernel/cpe_migrate.c 2009-01-09 11:37:47.130269369 -0600
18 +++ linux/arch/ia64/kernel/cpe_migrate.c 2009-01-09 11:44:43.658280930 -0600
20 #include <linux/page-isolation.h>
21 #include <linux/memcontrol.h>
22 #include <linux/kobject.h>
23 +#include <linux/kthread.h>
26 #include <asm/system.h>
27 @@ -40,12 +41,15 @@ static struct cpe_info cpe[CE_HISTORY_LE
28 static int cpe_polling_enabled = 1;
31 -static int work_scheduled;
32 static int mstat_cannot_isolate;
33 static int mstat_failed_to_discard;
34 static int mstat_already_marked;
35 static int mstat_already_on_list;
37 +/* IRQ handler notifies this wait queue on receipt of an IRQ */
38 +DECLARE_WAIT_QUEUE_HEAD(cpe_activate_IRQ_wq);
39 +static DECLARE_COMPLETION(kthread_cpe_migrated_exited);
41 DEFINE_SPINLOCK(cpe_migrate_lock);
44 @@ -159,12 +163,12 @@ ia64_mca_cpe_move_page(u64 paddr, u32 no
48 - * ia64_mca_cpe_migrate
49 - * The worker that does the actual migration. It pulls a
50 - * physical address off the list and calls the migration code.
52 + * Pulls the physical address off the list and calls the migration code.
53 + * Will process all the addresses on the list.
56 -ia64_mca_cpe_migrate(struct work_struct *unused)
58 +cpe_process_queue(void)
62 @@ -192,10 +196,36 @@ ia64_mca_cpe_migrate(struct work_struct
65 } while (cpe_tail != cpe_head);
73 + return (cpe_head == cpe_tail) && (!cpe[cpe_head].paddr);
77 + * kthread_cpe_migrate
78 + * kthread_cpe_migrate is created at module load time and lives
79 + * until the module is removed. When not active, it will sleep.
82 +kthread_cpe_migrate(void *ignore)
84 + while (cpe_active) {
88 + (void)wait_event_interruptible(cpe_activate_IRQ_wq,
89 + (!cpe_list_empty() ||
91 + cpe_process_queue(); /* process work */
93 + complete(&kthread_cpe_migrated_exited);
97 -static DECLARE_WORK(cpe_enable_work, ia64_mca_cpe_migrate);
98 DEFINE_SPINLOCK(cpe_list_lock);
101 @@ -227,10 +257,7 @@ cpe_setup_migrate(void *rec)
105 - if ((cpe_head != cpe_tail) || (cpe[cpe_head].paddr != 0))
109 + if (!cpe_list_empty())
110 for (i = 0; i < CE_HISTORY_LENGTH; i++) {
111 if (PAGE_ALIGN(cpe[i].paddr) == PAGE_ALIGN(paddr)) {
112 mstat_already_on_list++;
113 @@ -255,10 +282,7 @@ cpe_setup_migrate(void *rec)
115 spin_unlock(&cpe_list_lock);
117 - if (!work_scheduled) {
118 - work_scheduled = 1;
119 - schedule_work(&cpe_enable_work);
121 + wake_up_interruptible(&cpe_activate_IRQ_wq);
125 @@ -395,12 +419,23 @@ static int __init
126 cpe_migrate_external_handler_init(void)
129 + struct task_struct *kthread;
131 error = sysfs_create_file(kernel_kobj, &badram_attr.attr);
136 + * set up the kthread
139 + kthread = kthread_run(kthread_cpe_migrate, NULL, "cpe_migrate");
140 + if (IS_ERR(kthread)) {
141 + complete(&kthread_cpe_migrated_exited);
146 * register external ce handler
148 if (ia64_reg_CE_extension(cpe_setup_migrate)) {
149 @@ -418,6 +453,11 @@ cpe_migrate_external_handler_exit(void)
151 /* unregister external mca handlers */
152 ia64_unreg_CE_extension();
155 + cpe_active = 0; /* tell kthread_cpe_migrate to exit */
156 + wake_up_interruptible(&cpe_activate_IRQ_wq);
157 + wait_for_completion(&kthread_cpe_migrated_exited);
159 sysfs_remove_file(kernel_kobj, &badram_attr.attr);