1 From: Gerald Schaefer <geraldsc@de.ibm.com>
2 Subject: kernel: Fix user copy functions (pagetable walk) with KERNEL_DS.
5 Symptom: Kernel OOPS / system hang on user program core dump on pre z9
7 Problem: Passing incorrect addresses to user copy functions is not handled
8 correctly when address spaces are switched and KERNEL_DS is set.
9 Solution: Verify addresses by handling KERNEL_DS case in the same way as
10 USER_DS (pagetable walk). Also disable switch_amode by default
11 because pagetable walk has negative performance impact.
13 Acked-by: John Jolly <jjolly@suse.de>
15 arch/s390/kernel/setup.c | 4 ----
16 arch/s390/lib/uaccess_pt.c | 32 +++++++-------------------------
17 arch/s390/mm/pgtable.c | 4 ++++
18 3 files changed, 11 insertions(+), 29 deletions(-)
20 Index: linux-sles11/arch/s390/kernel/setup.c
21 ===================================================================
22 --- linux-sles11.orig/arch/s390/kernel/setup.c
23 +++ linux-sles11/arch/s390/kernel/setup.c
24 @@ -285,11 +285,7 @@ static int __init early_parse_mem(char *
25 early_param("mem", early_parse_mem);
27 #ifdef CONFIG_S390_SWITCH_AMODE
29 -unsigned int switch_amode = 1;
31 unsigned int switch_amode = 0;
33 EXPORT_SYMBOL_GPL(switch_amode);
35 static int set_amode_and_uaccess(unsigned long user_amode,
36 Index: linux-sles11/arch/s390/mm/pgtable.c
37 ===================================================================
38 --- linux-sles11.orig/arch/s390/mm/pgtable.c
39 +++ linux-sles11/arch/s390/mm/pgtable.c
40 @@ -256,6 +256,10 @@ int s390_enable_sie(void)
41 struct task_struct *tsk = current;
42 struct mm_struct *mm, *old_mm;
44 + /* Do we have switched amode? If no, we cannot do sie */
48 /* Do we have pgstes? if yes, we are done */
49 if (tsk->mm->context.pgstes)
51 Index: linux-sles11/arch/s390/lib/uaccess_pt.c
52 ===================================================================
53 --- linux-sles11.orig/arch/s390/lib/uaccess_pt.c
54 +++ linux-sles11/arch/s390/lib/uaccess_pt.c
55 @@ -43,8 +43,9 @@ static int __handle_fault(struct mm_stru
60 + if (in_atomic() || segment_eq(get_fs(), KERNEL_DS))
63 down_read(&mm->mmap_sem);
64 vma = find_vma(mm, address);
66 @@ -109,6 +110,8 @@ static size_t __user_copy_pt(unsigned lo
70 + if (segment_eq(get_fs(), KERNEL_DS))
74 spin_lock(&mm->page_table_lock);
75 @@ -182,10 +185,6 @@ size_t copy_from_user_pt(size_t n, const
79 - if (segment_eq(get_fs(), KERNEL_DS)) {
80 - memcpy(to, (void __kernel __force *) from, n);
83 rc = __user_copy_pt((unsigned long) from, to, n, 0);
85 memset(to + n - rc, 0, rc);
86 @@ -194,10 +193,6 @@ size_t copy_from_user_pt(size_t n, const
88 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
90 - if (segment_eq(get_fs(), KERNEL_DS)) {
91 - memcpy((void __kernel __force *) to, from, n);
94 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
97 @@ -205,10 +200,6 @@ static size_t clear_user_pt(size_t n, vo
101 - if (segment_eq(get_fs(), KERNEL_DS)) {
102 - memset((void __kernel __force *) to, 0, n);
107 if (n - done > PAGE_SIZE)
108 @@ -234,7 +225,7 @@ static size_t strnlen_user_pt(size_t cou
111 if (segment_eq(get_fs(), KERNEL_DS))
112 - return strnlen((const char __kernel __force *) src, count) + 1;
116 spin_lock(&mm->page_table_lock);
117 @@ -276,13 +267,6 @@ static size_t strncpy_from_user_pt(size_
121 - if (segment_eq(get_fs(), KERNEL_DS)) {
122 - memcpy(dst, (const char __kernel __force *) src, n);
123 - if (dst[n-1] == '\0')
128 if (__user_copy_pt((unsigned long) src, dst, n, 0))
130 if (dst[n-1] == '\0')
131 @@ -302,10 +286,8 @@ static size_t copy_in_user_pt(size_t n,
132 pte_t *pte_from, *pte_to;
135 - if (segment_eq(get_fs(), KERNEL_DS)) {
136 - memcpy((void __force *) to, (void __force *) from, n);
139 + if (segment_eq(get_fs(), KERNEL_DS))
143 spin_lock(&mm->page_table_lock);