1 From: Nick Piggin <npiggin@novell.com>
2 Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages
5 CONFIG_RODATA presents a problem for antivirus vendors who do not have a
6 clean user-space interface for getting virus scanning triggered, and
7 currently resort to patching the kernel code instead (presumably the
8 ystem call table). With CONFIG_RODATA enabled, the kernel rejects such
11 Add a new mark_rodata_rw() function to un-protect the read-only kernel code
12 pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules.
14 This is not meant as a permanent workaround, and will be removed again in the
17 Acked-by: Andres Gruenbacher <agruen@suse.de>
20 arch/x86/mm/init_32.c | 22 ++++++++++++++++++++++
21 arch/x86/mm/init_64.c | 17 +++++++++++++++++
22 arch/x86/mm/pageattr.c | 30 ++++++++++++++++++++++++++++--
23 include/asm-x86/cacheflush.h | 3 +++
24 4 files changed, 70 insertions(+), 2 deletions(-)
26 --- a/arch/x86/mm/init_32.c
27 +++ b/arch/x86/mm/init_32.c
28 @@ -1073,6 +1073,28 @@ void mark_rodata_ro(void)
29 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
32 +EXPORT_SYMBOL(mark_rodata_ro);
34 +void mark_rodata_rw(void)
36 + unsigned long start = PFN_ALIGN(_text);
37 + unsigned long size = PFN_ALIGN(_etext) - start;
39 +#ifndef CONFIG_DYNAMIC_FTRACE
40 + /* Dynamic tracing modifies the kernel text section */
41 + set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
42 + printk(KERN_INFO "Write enabling the kernel text: %luk\n",
45 +#endif /* CONFIG_DYNAMIC_FTRACE */
48 + size = (unsigned long)__end_rodata - start;
49 + set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
50 + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
53 +EXPORT_SYMBOL(mark_rodata_rw);
56 void free_init_pages(char *what, unsigned long begin, unsigned long end)
57 --- a/arch/x86/mm/init_64.c
58 +++ b/arch/x86/mm/init_64.c
59 @@ -896,7 +896,24 @@ void mark_rodata_ro(void)
60 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
63 +EXPORT_SYMBOL(mark_rodata_ro);
65 +void mark_rodata_rw(void)
67 + unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
68 + unsigned long rodata_start =
69 + ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
71 +#ifdef CONFIG_DYNAMIC_FTRACE
72 + /* Dynamic tracing modifies the kernel text section */
73 + start = rodata_start;
76 + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
77 + (end - start) >> 10);
78 + set_memory_rw_force(start, (end - start) >> PAGE_SHIFT);
80 +EXPORT_SYMBOL(mark_rodata_rw);
83 #ifdef CONFIG_BLK_DEV_INITRD
84 --- a/arch/x86/mm/pageattr.c
85 +++ b/arch/x86/mm/pageattr.c
86 @@ -190,6 +190,8 @@ static void cpa_flush_range(unsigned lon
90 +static int static_protections_allow_rodata __read_mostly;
93 * Certain areas of memory on x86 require very specific protection flags,
94 * for example the BIOS area or kernel text. Callers don't always get this
95 @@ -221,8 +223,10 @@ static inline pgprot_t static_protection
96 * catches all aliases.
98 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
99 - __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
100 - pgprot_val(forbidden) |= _PAGE_RW;
101 + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) {
102 + if (!static_protections_allow_rodata)
103 + pgprot_val(forbidden) |= _PAGE_RW;
106 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
108 @@ -956,6 +960,21 @@ int set_memory_rw(unsigned long addr, in
109 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
112 +/* hack: bypass kernel rodata section static_protections check. */
113 +int set_memory_rw_force(unsigned long addr, int numpages)
115 + static DEFINE_MUTEX(lock);
119 + static_protections_allow_rodata = 1;
120 + ret = change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
121 + static_protections_allow_rodata = 0;
122 + mutex_unlock(&lock);
127 int set_memory_np(unsigned long addr, int numpages)
129 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
130 @@ -1013,6 +1032,13 @@ int set_pages_rw(struct page *page, int
131 return set_memory_rw(addr, numpages);
134 +int set_pages_rw_force(struct page *page, int numpages)
136 + unsigned long addr = (unsigned long)page_address(page);
138 + return set_memory_rw_force(addr, numpages);
141 #ifdef CONFIG_DEBUG_PAGEALLOC
143 static int __set_pages_p(struct page *page, int numpages)
144 --- a/include/asm-x86/cacheflush.h
145 +++ b/include/asm-x86/cacheflush.h
146 @@ -63,6 +63,7 @@ int set_memory_x(unsigned long addr, int
147 int set_memory_nx(unsigned long addr, int numpages);
148 int set_memory_ro(unsigned long addr, int numpages);
149 int set_memory_rw(unsigned long addr, int numpages);
150 +int set_memory_rw_force(unsigned long addr, int numpages);
151 int set_memory_np(unsigned long addr, int numpages);
152 int set_memory_4k(unsigned long addr, int numpages);
154 @@ -92,6 +93,7 @@ int set_pages_x(struct page *page, int n
155 int set_pages_nx(struct page *page, int numpages);
156 int set_pages_ro(struct page *page, int numpages);
157 int set_pages_rw(struct page *page, int numpages);
158 +int set_pages_rw_force(struct page *page, int numpages);
161 void clflush_cache_range(void *addr, unsigned int size);
162 @@ -100,6 +102,7 @@ void cpa_init(void);
164 #ifdef CONFIG_DEBUG_RODATA
165 void mark_rodata_ro(void);
166 +void mark_rodata_rw(void);
167 extern const int rodata_test_data;