]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.suse/x86-mark_rodata_rw.patch
Move xen patchset to new version's subdir.
[ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.suse / x86-mark_rodata_rw.patch
CommitLineData
00e5a55c
BS
1From: Nick Piggin <npiggin@novell.com>
2Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages
3References: bnc#439348
4
5CONFIG_RODATA presents a problem for antivirus vendors who do not have a
6clean user-space interface for getting virus scanning triggered, and
7currently resort to patching the kernel code instead (presumably the
8ystem call table). With CONFIG_RODATA enabled, the kernel rejects such
9write accesses.
10
11Add a new mark_rodata_rw() function to un-protect the read-only kernel code
12pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules.
13
14This is not meant as a permanent workaround, and will be removed again in the
15next release!
16
17Acked-by: Andres Gruenbacher <agruen@suse.de>
18
19---
20 arch/x86/mm/init_32.c | 22 ++++++++++++++++++++++
21 arch/x86/mm/init_64.c | 17 +++++++++++++++++
22 arch/x86/mm/pageattr.c | 30 ++++++++++++++++++++++++++++--
23 include/asm-x86/cacheflush.h | 3 +++
24 4 files changed, 70 insertions(+), 2 deletions(-)
25
26--- a/arch/x86/mm/init_32.c
27+++ b/arch/x86/mm/init_32.c
28@@ -1073,6 +1073,28 @@ void mark_rodata_ro(void)
29 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
30 #endif
31 }
32+EXPORT_SYMBOL(mark_rodata_ro);
33+
34+void mark_rodata_rw(void)
35+{
36+ unsigned long start = PFN_ALIGN(_text);
37+ unsigned long size = PFN_ALIGN(_etext) - start;
38+
39+#ifndef CONFIG_DYNAMIC_FTRACE
40+ /* Dynamic tracing modifies the kernel text section */
41+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
42+ printk(KERN_INFO "Write enabling the kernel text: %luk\n",
43+ size >> 10);
44+
45+#endif /* CONFIG_DYNAMIC_FTRACE */
46+
47+ start += size;
48+ size = (unsigned long)__end_rodata - start;
49+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
50+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
51+ size >> 10);
52+}
53+EXPORT_SYMBOL(mark_rodata_rw);
54 #endif
55
56 void free_init_pages(char *what, unsigned long begin, unsigned long end)
57--- a/arch/x86/mm/init_64.c
58+++ b/arch/x86/mm/init_64.c
59@@ -896,7 +896,24 @@ void mark_rodata_ro(void)
60 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
61 #endif
62 }
63+EXPORT_SYMBOL(mark_rodata_ro);
64
65+void mark_rodata_rw(void)
66+{
67+ unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
68+ unsigned long rodata_start =
69+ ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
70+
71+#ifdef CONFIG_DYNAMIC_FTRACE
72+ /* Dynamic tracing modifies the kernel text section */
73+ start = rodata_start;
74+#endif
75+
76+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
77+ (end - start) >> 10);
78+ set_memory_rw_force(start, (end - start) >> PAGE_SHIFT);
79+}
80+EXPORT_SYMBOL(mark_rodata_rw);
81 #endif
82
83 #ifdef CONFIG_BLK_DEV_INITRD
84--- a/arch/x86/mm/pageattr.c
85+++ b/arch/x86/mm/pageattr.c
86@@ -190,6 +190,8 @@ static void cpa_flush_range(unsigned lon
87 }
88 }
89
90+static int static_protections_allow_rodata __read_mostly;
91+
92 /*
93 * Certain areas of memory on x86 require very specific protection flags,
94 * for example the BIOS area or kernel text. Callers don't always get this
95@@ -221,8 +223,10 @@ static inline pgprot_t static_protection
96 * catches all aliases.
97 */
98 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
99- __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
100- pgprot_val(forbidden) |= _PAGE_RW;
101+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) {
102+ if (!static_protections_allow_rodata)
103+ pgprot_val(forbidden) |= _PAGE_RW;
104+ }
105
106 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
107
108@@ -956,6 +960,21 @@ int set_memory_rw(unsigned long addr, in
109 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
110 }
111
112+/* hack: bypass kernel rodata section static_protections check. */
113+int set_memory_rw_force(unsigned long addr, int numpages)
114+{
115+ static DEFINE_MUTEX(lock);
116+ int ret;
117+
118+ mutex_lock(&lock);
119+ static_protections_allow_rodata = 1;
120+ ret = change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
121+ static_protections_allow_rodata = 0;
122+ mutex_unlock(&lock);
123+
124+ return ret;
125+}
126+
127 int set_memory_np(unsigned long addr, int numpages)
128 {
129 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
130@@ -1013,6 +1032,13 @@ int set_pages_rw(struct page *page, int
131 return set_memory_rw(addr, numpages);
132 }
133
134+int set_pages_rw_force(struct page *page, int numpages)
135+{
136+ unsigned long addr = (unsigned long)page_address(page);
137+
138+ return set_memory_rw_force(addr, numpages);
139+}
140+
141 #ifdef CONFIG_DEBUG_PAGEALLOC
142
143 static int __set_pages_p(struct page *page, int numpages)
144--- a/include/asm-x86/cacheflush.h
145+++ b/include/asm-x86/cacheflush.h
146@@ -63,6 +63,7 @@ int set_memory_x(unsigned long addr, int
147 int set_memory_nx(unsigned long addr, int numpages);
148 int set_memory_ro(unsigned long addr, int numpages);
149 int set_memory_rw(unsigned long addr, int numpages);
150+int set_memory_rw_force(unsigned long addr, int numpages);
151 int set_memory_np(unsigned long addr, int numpages);
152 int set_memory_4k(unsigned long addr, int numpages);
153
154@@ -92,6 +93,7 @@ int set_pages_x(struct page *page, int n
155 int set_pages_nx(struct page *page, int numpages);
156 int set_pages_ro(struct page *page, int numpages);
157 int set_pages_rw(struct page *page, int numpages);
158+int set_pages_rw_force(struct page *page, int numpages);
159
160
161 void clflush_cache_range(void *addr, unsigned int size);
162@@ -100,6 +102,7 @@ void cpa_init(void);
163
164 #ifdef CONFIG_DEBUG_RODATA
165 void mark_rodata_ro(void);
166+void mark_rodata_rw(void);
167 extern const int rodata_test_data;
168 #endif
169