]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.arch/s390-11-01-kernel_ds_pgtable_walk.patch
Add a patch to fix Intel E100 wake-on-lan problems.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / s390-11-01-kernel_ds_pgtable_walk.patch
1 From: Gerald Schaefer <geraldsc@de.ibm.com>
2 Subject: kernel: don't use pagetable walk for KERNEL_DS
3 References: bnc#484767,LTC#52176
4
5 Symptom: Access to KERNEL_DS via usercopy functions is very slow on
6 pre z9 hardware.
7 Problem: The usercopy functions do a pagetable walk even if KERNEL_DS
8 is set on pre z9 hardware. This was a workaroud for a common
9 code bug in SLES 11.
10 Solution: Don't use pagetable walk for KERNEL_DS as the common code bug
11 has been fixed in SLES 11 GMC.
12
13 Acked-by: John Jolly <jjolly@suse.de>
14 ---
15 arch/s390/lib/uaccess_pt.c | 32 +++++++++++++++++++++++++-------
16 1 file changed, 25 insertions(+), 7 deletions(-)
17
18 Index: linux-sles11/arch/s390/lib/uaccess_pt.c
19 ===================================================================
20 --- linux-sles11.orig/arch/s390/lib/uaccess_pt.c
21 +++ linux-sles11/arch/s390/lib/uaccess_pt.c
22 @@ -43,9 +43,8 @@ static int __handle_fault(struct mm_stru
23 int ret = -EFAULT;
24 int fault;
25
26 - if (in_atomic() || segment_eq(get_fs(), KERNEL_DS))
27 + if (in_atomic())
28 return ret;
29 -
30 down_read(&mm->mmap_sem);
31 vma = find_vma(mm, address);
32 if (unlikely(!vma))
33 @@ -110,8 +109,6 @@ static size_t __user_copy_pt(unsigned lo
34 pte_t *pte;
35 void *from, *to;
36
37 - if (segment_eq(get_fs(), KERNEL_DS))
38 - mm = &init_mm;
39 done = 0;
40 retry:
41 spin_lock(&mm->page_table_lock);
42 @@ -185,6 +182,10 @@ size_t copy_from_user_pt(size_t n, const
43 {
44 size_t rc;
45
46 + if (segment_eq(get_fs(), KERNEL_DS)) {
47 + memcpy(to, (void __kernel __force *) from, n);
48 + return 0;
49 + }
50 rc = __user_copy_pt((unsigned long) from, to, n, 0);
51 if (unlikely(rc))
52 memset(to + n - rc, 0, rc);
53 @@ -193,6 +194,10 @@ size_t copy_from_user_pt(size_t n, const
54
55 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
56 {
57 + if (segment_eq(get_fs(), KERNEL_DS)) {
58 + memcpy((void __kernel __force *) to, from, n);
59 + return 0;
60 + }
61 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
62 }
63
64 @@ -200,6 +205,10 @@ static size_t clear_user_pt(size_t n, vo
65 {
66 long done, size, ret;
67
68 + if (segment_eq(get_fs(), KERNEL_DS)) {
69 + memset((void __kernel __force *) to, 0, n);
70 + return 0;
71 + }
72 done = 0;
73 do {
74 if (n - done > PAGE_SIZE)
75 @@ -225,7 +234,7 @@ static size_t strnlen_user_pt(size_t cou
76 size_t len_str;
77
78 if (segment_eq(get_fs(), KERNEL_DS))
79 - mm = &init_mm;
80 + return strnlen((const char __kernel __force *) src, count) + 1;
81 done = 0;
82 retry:
83 spin_lock(&mm->page_table_lock);
84 @@ -267,6 +276,13 @@ static size_t strncpy_from_user_pt(size_
85 return -EFAULT;
86 if (n > count)
87 n = count;
88 + if (segment_eq(get_fs(), KERNEL_DS)) {
89 + memcpy(dst, (const char __kernel __force *) src, n);
90 + if (dst[n-1] == '\0')
91 + return n-1;
92 + else
93 + return n;
94 + }
95 if (__user_copy_pt((unsigned long) src, dst, n, 0))
96 return -EFAULT;
97 if (dst[n-1] == '\0')
98 @@ -286,8 +302,10 @@ static size_t copy_in_user_pt(size_t n,
99 pte_t *pte_from, *pte_to;
100 int write_user;
101
102 - if (segment_eq(get_fs(), KERNEL_DS))
103 - mm = &init_mm;
104 + if (segment_eq(get_fs(), KERNEL_DS)) {
105 + memcpy((void __force *) to, (void __force *) from, n);
106 + return 0;
107 + }
108 done = 0;
109 retry:
110 spin_lock(&mm->page_table_lock);