]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/xen3-x86-mark_rodata_rw.patch
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / xen3-x86-mark_rodata_rw.patch
1 From: Nick Piggin <npiggin@novell.com>
2 Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages
3 References: bnc#439348
4
5 CONFIG_RODATA presents a problem for antivirus vendors who do not have a
6 clean user-space interface for getting virus scanning triggered, and
7 currently resort to patching the kernel code instead (presumably the
8 ystem call table). With CONFIG_RODATA enabled, the kernel rejects such
9 write accesses.
10
11 Add a new mark_rodata_rw() function to un-protect the read-only kernel code
12 pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules.
13
14 This is not meant as a permanent workaround, and will be removed again in the
15 next release!
16
17 Acked-by: Andres Gruenbacher <agruen@suse.de>
18
19 Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-patches.py
20
21 --- sle11-2009-03-16.orig/arch/x86/mm/init_32-xen.c 2009-03-16 16:38:33.000000000 +0100
22 +++ sle11-2009-03-16/arch/x86/mm/init_32-xen.c 2009-03-16 16:39:50.000000000 +0100
23 @@ -1125,6 +1125,28 @@ void mark_rodata_ro(void)
24 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25 #endif
26 }
27 +EXPORT_SYMBOL(mark_rodata_ro);
28 +
29 +void mark_rodata_rw(void)
30 +{
31 + unsigned long start = PFN_ALIGN(_text);
32 + unsigned long size = PFN_ALIGN(_etext) - start;
33 +
34 +#ifndef CONFIG_DYNAMIC_FTRACE
35 + /* Dynamic tracing modifies the kernel text section */
36 + set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
37 + printk(KERN_INFO "Write enabling the kernel text: %luk\n",
38 + size >> 10);
39 +
40 +#endif /* CONFIG_DYNAMIC_FTRACE */
41 +
42 + start += size;
43 + size = (unsigned long)__end_rodata - start;
44 + set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
45 + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
46 + size >> 10);
47 +}
48 +EXPORT_SYMBOL(mark_rodata_rw);
49 #endif
50
51 void free_init_pages(char *what, unsigned long begin, unsigned long end)
52 --- sle11-2009-03-16.orig/arch/x86/mm/init_64-xen.c 2009-03-16 16:39:48.000000000 +0100
53 +++ sle11-2009-03-16/arch/x86/mm/init_64-xen.c 2009-03-16 16:39:50.000000000 +0100
54 @@ -1202,7 +1202,24 @@ void mark_rodata_ro(void)
55 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
56 #endif
57 }
58 +EXPORT_SYMBOL(mark_rodata_ro);
59
60 +void mark_rodata_rw(void)
61 +{
62 + unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
63 +#ifdef CONFIG_DYNAMIC_FTRACE
64 + unsigned long rodata_start =
65 + ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
66 +
67 + /* Dynamic tracing modifies the kernel text section */
68 + start = rodata_start;
69 +#endif
70 +
71 + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
72 + (end - start) >> 10);
73 + set_memory_rw_force(start, (end - start) >> PAGE_SHIFT);
74 +}
75 +EXPORT_SYMBOL(mark_rodata_rw);
76 #endif
77
78 #ifdef CONFIG_BLK_DEV_INITRD
79 --- sle11-2009-03-16.orig/arch/x86/mm/pageattr-xen.c 2009-03-16 16:38:38.000000000 +0100
80 +++ sle11-2009-03-16/arch/x86/mm/pageattr-xen.c 2009-03-16 16:39:50.000000000 +0100
81 @@ -190,6 +190,8 @@ static void cpa_flush_range(unsigned lon
82 }
83 }
84
85 +static int static_protections_allow_rodata __read_mostly;
86 +
87 /*
88 * Certain areas of memory on x86 require very specific protection flags,
89 * for example the BIOS area or kernel text. Callers don't always get this
90 @@ -223,8 +225,10 @@ static inline pgprot_t static_protection
91 * catches all aliases.
92 */
93 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
94 - __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
95 - pgprot_val(forbidden) |= _PAGE_RW;
96 + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) {
97 + if (!static_protections_allow_rodata)
98 + pgprot_val(forbidden) |= _PAGE_RW;
99 + }
100
101 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
102
103 @@ -1014,6 +1018,21 @@ int set_memory_rw(unsigned long addr, in
104 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
105 }
106
107 +/* hack: bypass kernel rodata section static_protections check. */
108 +int set_memory_rw_force(unsigned long addr, int numpages)
109 +{
110 + static DEFINE_MUTEX(lock);
111 + int ret;
112 +
113 + mutex_lock(&lock);
114 + static_protections_allow_rodata = 1;
115 + ret = change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
116 + static_protections_allow_rodata = 0;
117 + mutex_unlock(&lock);
118 +
119 + return ret;
120 +}
121 +
122 int set_memory_np(unsigned long addr, int numpages)
123 {
124 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
125 @@ -1071,6 +1090,13 @@ int set_pages_rw(struct page *page, int
126 return set_memory_rw(addr, numpages);
127 }
128
129 +int set_pages_rw_force(struct page *page, int numpages)
130 +{
131 + unsigned long addr = (unsigned long)page_address(page);
132 +
133 + return set_memory_rw_force(addr, numpages);
134 +}
135 +
136 #ifdef CONFIG_DEBUG_PAGEALLOC
137
138 static int __set_pages_p(struct page *page, int numpages)