]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.arch/s390-11-15-kernel_task_size.patch
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.arch / s390-11-15-kernel_task_size.patch
CommitLineData
2cb7cef9
BS
1From: Gerald Schaefer <geraldsc@de.ibm.com>
2Subject: kernel: fix dynamic TASK_SIZE handling.
3References: bnc#484767,LTC#52259
4
5Symptom: System crash (memory overwrite) on access to /proc/<pid>/pagemap.
6Problem: pagemap_read() is using TASK_SIZE_OF to determine the address
7 range for the generic page table walker. With dynamic page table
8 upgrades on s390, this does not reflect the true task size, but
9 the maximum task size, with the maximum level of page tables. If
10 a process has not yet mmapped enough memory, the page tables will
11 not be completely upgraded and the generic page table walker will
12 access (and write) beyond the page tables.
13Solution: Change TASK_SIZE/TASK_SIZE_OF to reflect the current size of the
14 address space.
15
16Acked-by: John Jolly <jjolly@suse.de>
17---
18 arch/s390/include/asm/mman.h | 5 +++
19 arch/s390/include/asm/processor.h | 5 +--
20 arch/s390/mm/mmap.c | 48 +++++++++++++++++++++++++-------------
21 arch/s390/mm/pgtable.c | 2 +
22 4 files changed, 41 insertions(+), 19 deletions(-)
23
24Index: linux-2.6.27/arch/s390/include/asm/mman.h
25===================================================================
26--- linux-2.6.27.orig/arch/s390/include/asm/mman.h
27+++ linux-2.6.27/arch/s390/include/asm/mman.h
28@@ -22,4 +22,9 @@
29 #define MCL_CURRENT 1 /* lock all current mappings */
30 #define MCL_FUTURE 2 /* lock all future mappings */
31
32+#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
33+int s390_mmap_check(unsigned long addr, unsigned long len);
34+#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len)
35+#endif
36+
37 #endif /* __S390_MMAN_H__ */
38Index: linux-2.6.27/arch/s390/include/asm/processor.h
39===================================================================
40--- linux-2.6.27.orig/arch/s390/include/asm/processor.h
41+++ linux-2.6.27/arch/s390/include/asm/processor.h
42@@ -60,7 +60,7 @@ extern void print_cpu_info(struct cpuinf
43 extern int get_cpu_capability(unsigned int *);
44
45 /*
46- * User space process size: 2GB for 31 bit, 4TB for 64 bit.
47+ * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
48 */
49 #ifndef __s390x__
50
51@@ -69,8 +69,7 @@ extern int get_cpu_capability(unsigned i
52
53 #else /* __s390x__ */
54
55-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \
56- (1UL << 31) : (1UL << 53))
57+#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
58 #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
59 (1UL << 30) : (1UL << 41))
60 #define TASK_SIZE TASK_SIZE_OF(current)
61Index: linux-2.6.27/arch/s390/mm/mmap.c
62===================================================================
63--- linux-2.6.27.orig/arch/s390/mm/mmap.c
64+++ linux-2.6.27/arch/s390/mm/mmap.c
65@@ -35,7 +35,7 @@
66 * Leave an at least ~128 MB hole.
67 */
68 #define MIN_GAP (128*1024*1024)
69-#define MAX_GAP (TASK_SIZE/6*5)
70+#define MAX_GAP (STACK_TOP/6*5)
71
72 static inline unsigned long mmap_base(void)
73 {
74@@ -46,7 +46,7 @@ static inline unsigned long mmap_base(vo
75 else if (gap > MAX_GAP)
76 gap = MAX_GAP;
77
78- return TASK_SIZE - (gap & PAGE_MASK);
79+ return STACK_TOP - (gap & PAGE_MASK);
80 }
81
82 static inline int mmap_is_legacy(void)
83@@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout)
84
85 #else
86
87+int s390_mmap_check(unsigned long addr, unsigned long len)
88+{
89+ if (!test_thread_flag(TIF_31BIT) &&
90+ len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
91+ return crst_table_upgrade(current->mm, 1UL << 53);
92+ return 0;
93+}
94+
95 static unsigned long
96 s390_get_unmapped_area(struct file *filp, unsigned long addr,
97 unsigned long len, unsigned long pgoff, unsigned long flags)
98 {
99 struct mm_struct *mm = current->mm;
100+ unsigned long area;
101 int rc;
102
103- addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
104- if (addr & ~PAGE_MASK)
105- return addr;
106- if (unlikely(mm->context.asce_limit < addr + len)) {
107- rc = crst_table_upgrade(mm, addr + len);
108+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
109+ if (!(area & ~PAGE_MASK))
110+ return area;
111+ if (area == -ENOMEM &&
112+ !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
113+ /* Upgrade the page table to 4 levels and retry. */
114+ rc = crst_table_upgrade(mm, 1UL << 53);
115 if (rc)
116 return (unsigned long) rc;
117+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
118 }
119- return addr;
120+ return area;
121 }
122
123 static unsigned long
124-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
125+s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
126 const unsigned long len, const unsigned long pgoff,
127 const unsigned long flags)
128 {
129 struct mm_struct *mm = current->mm;
130- unsigned long addr = addr0;
131+ unsigned long area;
132 int rc;
133
134- addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
135- if (addr & ~PAGE_MASK)
136- return addr;
137- if (unlikely(mm->context.asce_limit < addr + len)) {
138- rc = crst_table_upgrade(mm, addr + len);
139+ area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
140+ if (!(area & ~PAGE_MASK))
141+ return area;
142+ if (area == -ENOMEM &&
143+ !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
144+ /* Upgrade the page table to 4 levels and retry. */
145+ rc = crst_table_upgrade(mm, 1UL << 53);
146 if (rc)
147 return (unsigned long) rc;
148+ area = arch_get_unmapped_area_topdown(filp, addr, len,
149+ pgoff, flags);
150 }
151- return addr;
152+ return area;
153 }
154 /*
155 * This function, called very early during the creation of a new
156Index: linux-2.6.27/arch/s390/mm/pgtable.c
157===================================================================
158--- linux-2.6.27.orig/arch/s390/mm/pgtable.c
159+++ linux-2.6.27/arch/s390/mm/pgtable.c
160@@ -117,6 +117,7 @@ repeat:
161 crst_table_init(table, entry);
162 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
163 mm->pgd = (pgd_t *) table;
164+ mm->task_size = mm->context.asce_limit;
165 table = NULL;
166 }
167 spin_unlock(&mm->page_table_lock);
168@@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_stru
169 BUG();
170 }
171 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
172+ mm->task_size = mm->context.asce_limit;
173 crst_table_free(mm, (unsigned long *) pgd);
174 }
175 update_mm(mm, current);