]>
Commit | Line | Data |
---|---|---|
6883c528 PK |
1 | diff --git a/Documentation/dontdiff b/Documentation/dontdiff |
2 | index 9de9813..1462492 100644 | |
3 | --- a/Documentation/dontdiff | |
4 | +++ b/Documentation/dontdiff | |
5 | @@ -3,9 +3,11 @@ | |
6 | *.bc | |
7 | *.bin | |
8 | *.bz2 | |
9 | +*.c.[012]*.* | |
10 | *.cis | |
11 | *.cpio | |
12 | *.csp | |
13 | +*.dbg | |
14 | *.dsp | |
15 | *.dvi | |
16 | *.elf | |
17 | @@ -15,6 +17,7 @@ | |
18 | *.gcov | |
19 | *.gen.S | |
20 | *.gif | |
21 | +*.gmo | |
22 | *.grep | |
23 | *.grp | |
24 | *.gz | |
25 | @@ -51,14 +54,17 @@ | |
26 | *.tab.h | |
27 | *.tex | |
28 | *.ver | |
29 | +*.vim | |
30 | *.xml | |
31 | *.xz | |
32 | *_MODULES | |
33 | +*_reg_safe.h | |
34 | *_vga16.c | |
35 | *~ | |
36 | \#*# | |
37 | *.9 | |
38 | -.* | |
39 | +.[^g]* | |
40 | +.gen* | |
41 | .*.d | |
42 | .mm | |
43 | 53c700_d.h | |
44 | @@ -72,9 +78,11 @@ Image | |
45 | Module.markers | |
46 | Module.symvers | |
47 | PENDING | |
48 | +PERF* | |
49 | SCCS | |
50 | System.map* | |
51 | TAGS | |
52 | +TRACEEVENT-CFLAGS | |
53 | aconf | |
54 | af_names.h | |
55 | aic7*reg.h* | |
56 | @@ -83,6 +91,7 @@ aic7*seq.h* | |
57 | aicasm | |
58 | aicdb.h* | |
59 | altivec*.c | |
60 | +ashldi3.S | |
61 | asm-offsets.h | |
62 | asm_offsets.h | |
63 | autoconf.h* | |
64 | @@ -95,32 +104,40 @@ bounds.h | |
65 | bsetup | |
66 | btfixupprep | |
67 | build | |
68 | +builtin-policy.h | |
69 | bvmlinux | |
70 | bzImage* | |
71 | capability_names.h | |
72 | capflags.c | |
73 | classlist.h* | |
74 | +clut_vga16.c | |
75 | +common-cmds.h | |
76 | comp*.log | |
77 | compile.h* | |
78 | conf | |
79 | config | |
80 | config-* | |
81 | config_data.h* | |
82 | +config.c | |
83 | config.mak | |
84 | config.mak.autogen | |
85 | +config.tmp | |
86 | conmakehash | |
87 | consolemap_deftbl.c* | |
88 | cpustr.h | |
89 | crc32table.h* | |
90 | cscope.* | |
91 | defkeymap.c | |
92 | +devicetable-offsets.h | |
93 | devlist.h* | |
94 | dnotify_test | |
95 | docproc | |
96 | dslm | |
97 | +dtc-lexer.lex.c | |
98 | elf2ecoff | |
99 | elfconfig.h* | |
100 | evergreen_reg_safe.h | |
101 | +exception_policy.conf | |
102 | fixdep | |
103 | flask.h | |
104 | fore200e_mkfirm | |
105 | @@ -128,12 +145,15 @@ fore200e_pca_fw.c* | |
106 | gconf | |
107 | gconf.glade.h | |
108 | gen-devlist | |
109 | +gen-kdb_cmds.c | |
110 | gen_crc32table | |
111 | gen_init_cpio | |
112 | generated | |
113 | genheaders | |
114 | genksyms | |
115 | *_gray256.c | |
116 | +hash | |
117 | +hid-example | |
118 | hpet_example | |
119 | hugepage-mmap | |
120 | hugepage-shm | |
121 | @@ -148,14 +168,14 @@ int32.c | |
122 | int4.c | |
123 | int8.c | |
124 | kallsyms | |
125 | -kconfig | |
126 | +kern_constants.h | |
127 | keywords.c | |
128 | ksym.c* | |
129 | ksym.h* | |
130 | kxgettext | |
131 | lex.c | |
132 | lex.*.c | |
133 | -linux | |
134 | +lib1funcs.S | |
135 | logo_*.c | |
136 | logo_*_clut224.c | |
137 | logo_*_mono.c | |
138 | @@ -165,14 +185,15 @@ mach-types.h | |
139 | machtypes.h | |
140 | map | |
141 | map_hugetlb | |
142 | -media | |
143 | mconf | |
144 | +mdp | |
145 | miboot* | |
146 | mk_elfconfig | |
147 | mkboot | |
148 | mkbugboot | |
149 | mkcpustr | |
150 | mkdep | |
151 | +mkpiggy | |
152 | mkprep | |
153 | mkregtable | |
154 | mktables | |
155 | @@ -188,6 +209,8 @@ oui.c* | |
156 | page-types | |
157 | parse.c | |
158 | parse.h | |
159 | +parse-events* | |
160 | +pasyms.h | |
161 | patches* | |
162 | pca200e.bin | |
163 | pca200e_ecd.bin2 | |
164 | @@ -197,6 +220,7 @@ perf-archive | |
165 | piggyback | |
166 | piggy.gzip | |
167 | piggy.S | |
168 | +pmu-* | |
169 | pnmtologo | |
170 | ppc_defs.h* | |
171 | pss_boot.h | |
172 | @@ -206,7 +230,12 @@ r200_reg_safe.h | |
173 | r300_reg_safe.h | |
174 | r420_reg_safe.h | |
175 | r600_reg_safe.h | |
176 | +randomize_layout_hash.h | |
177 | +randomize_layout_seed.h | |
178 | +realmode.lds | |
179 | +realmode.relocs | |
180 | recordmcount | |
181 | +regdb.c | |
182 | relocs | |
183 | rlim_names.h | |
184 | rn50_reg_safe.h | |
185 | @@ -216,8 +245,12 @@ series | |
186 | setup | |
187 | setup.bin | |
188 | setup.elf | |
189 | +signing_key* | |
190 | +size_overflow_hash.h | |
191 | sImage | |
192 | +slabinfo | |
193 | sm_tbl* | |
194 | +sortextable | |
195 | split-include | |
196 | syscalltab.h | |
197 | tables.c | |
198 | @@ -227,6 +260,7 @@ tftpboot.img | |
199 | timeconst.h | |
200 | times.h* | |
201 | trix_boot.h | |
202 | +user_constants.h | |
203 | utsrelease.h* | |
204 | vdso-syms.lds | |
205 | vdso.lds | |
206 | @@ -238,13 +272,17 @@ vdso32.lds | |
207 | vdso32.so.dbg | |
208 | vdso64.lds | |
209 | vdso64.so.dbg | |
210 | +vdsox32.lds | |
211 | +vdsox32-syms.lds | |
212 | version.h* | |
213 | vmImage | |
214 | vmlinux | |
215 | vmlinux-* | |
216 | vmlinux.aout | |
217 | vmlinux.bin.all | |
218 | +vmlinux.bin.bz2 | |
219 | vmlinux.lds | |
220 | +vmlinux.relocs | |
221 | vmlinuz | |
222 | voffset.h | |
223 | vsyscall.lds | |
224 | @@ -252,9 +290,12 @@ vsyscall_32.lds | |
225 | wanxlfw.inc | |
226 | uImage | |
227 | unifdef | |
228 | +utsrelease.h | |
229 | wakeup.bin | |
230 | wakeup.elf | |
231 | wakeup.lds | |
232 | +x509* | |
233 | zImage* | |
234 | zconf.hash.c | |
235 | +zconf.lex.c | |
236 | zoffset.h | |
237 | diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt | |
238 | index a311db8..415b28c 100644 | |
239 | --- a/Documentation/kbuild/makefiles.txt | |
240 | +++ b/Documentation/kbuild/makefiles.txt | |
241 | @@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles. | |
242 | === 4 Host Program support | |
243 | --- 4.1 Simple Host Program | |
244 | --- 4.2 Composite Host Programs | |
245 | - --- 4.3 Using C++ for host programs | |
246 | - --- 4.4 Controlling compiler options for host programs | |
247 | - --- 4.5 When host programs are actually built | |
248 | - --- 4.6 Using hostprogs-$(CONFIG_FOO) | |
249 | + --- 4.3 Defining shared libraries | |
250 | + --- 4.4 Using C++ for host programs | |
251 | + --- 4.5 Controlling compiler options for host programs | |
252 | + --- 4.6 When host programs are actually built | |
253 | + --- 4.7 Using hostprogs-$(CONFIG_FOO) | |
254 | ||
255 | === 5 Kbuild clean infrastructure | |
256 | ||
257 | @@ -642,7 +643,29 @@ Both possibilities are described in the following. | |
258 | Finally, the two .o files are linked to the executable, lxdialog. | |
259 | Note: The syntax <executable>-y is not permitted for host-programs. | |
260 | ||
261 | ---- 4.3 Using C++ for host programs | |
262 | +--- 4.3 Defining shared libraries | |
263 | + | |
264 | + Objects with extension .so are considered shared libraries, and | |
265 | + will be compiled as position independent objects. | |
266 | + Kbuild provides support for shared libraries, but the usage | |
267 | + shall be restricted. | |
268 | + In the following example the libkconfig.so shared library is used | |
269 | + to link the executable conf. | |
270 | + | |
271 | + Example: | |
272 | + #scripts/kconfig/Makefile | |
273 | + hostprogs-y := conf | |
274 | + conf-objs := conf.o libkconfig.so | |
275 | + libkconfig-objs := expr.o type.o | |
276 | + | |
277 | + Shared libraries always require a corresponding -objs line, and | |
278 | + in the example above the shared library libkconfig is composed by | |
279 | + the two objects expr.o and type.o. | |
280 | + expr.o and type.o will be built as position independent code and | |
281 | + linked as a shared library libkconfig.so. C++ is not supported for | |
282 | + shared libraries. | |
283 | + | |
284 | +--- 4.4 Using C++ for host programs | |
285 | ||
286 | kbuild offers support for host programs written in C++. This was | |
287 | introduced solely to support kconfig, and is not recommended | |
288 | @@ -665,7 +688,7 @@ Both possibilities are described in the following. | |
289 | qconf-cxxobjs := qconf.o | |
290 | qconf-objs := check.o | |
291 | ||
292 | ---- 4.4 Controlling compiler options for host programs | |
293 | +--- 4.5 Controlling compiler options for host programs | |
294 | ||
295 | When compiling host programs, it is possible to set specific flags. | |
296 | The programs will always be compiled utilising $(HOSTCC) passed | |
297 | @@ -693,7 +716,7 @@ Both possibilities are described in the following. | |
298 | When linking qconf, it will be passed the extra option | |
299 | "-L$(QTDIR)/lib". | |
300 | ||
301 | ---- 4.5 When host programs are actually built | |
302 | +--- 4.6 When host programs are actually built | |
303 | ||
304 | Kbuild will only build host-programs when they are referenced | |
305 | as a prerequisite. | |
306 | @@ -724,7 +747,7 @@ Both possibilities are described in the following. | |
307 | This will tell kbuild to build lxdialog even if not referenced in | |
308 | any rule. | |
309 | ||
310 | ---- 4.6 Using hostprogs-$(CONFIG_FOO) | |
311 | +--- 4.7 Using hostprogs-$(CONFIG_FOO) | |
312 | ||
313 | A typical pattern in a Kbuild file looks like this: | |
314 | ||
315 | diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt | |
316 | index f4c71d4..66811b1 100644 | |
317 | --- a/Documentation/kernel-parameters.txt | |
318 | +++ b/Documentation/kernel-parameters.txt | |
319 | @@ -1182,6 +1182,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |
320 | Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0. | |
321 | Default: 1024 | |
322 | ||
323 | + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to | |
324 | + ignore grsecurity's /proc restrictions | |
325 | + | |
326 | + | |
327 | hashdist= [KNL,NUMA] Large hashes allocated during boot | |
328 | are distributed across NUMA nodes. Defaults on | |
329 | for 64-bit NUMA, off otherwise. | |
330 | @@ -2260,6 +2264,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |
331 | noexec=on: enable non-executable mappings (default) | |
332 | noexec=off: disable non-executable mappings | |
333 | ||
334 | + nopcid [X86-64] | |
335 | + Disable PCID (Process-Context IDentifier) even if it | |
336 | + is supported by the processor. | |
337 | + | |
338 | nosmap [X86] | |
339 | Disable SMAP (Supervisor Mode Access Prevention) | |
340 | even if it is supported by processor. | |
341 | @@ -2552,6 +2560,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |
342 | the specified number of seconds. This is to be used if | |
343 | your oopses keep scrolling off the screen. | |
344 | ||
345 | + pax_nouderef [X86] disables UDEREF. Most likely needed under certain | |
346 | + virtualization environments that don't cope well with the | |
347 | + expand down segment used by UDEREF on X86-32 or the frequent | |
348 | + page table updates on X86-64. | |
349 | + | |
350 | + pax_sanitize_slab= | |
351 | + Format: { 0 | 1 | off | fast | full } | |
352 | + Options '0' and '1' are only provided for backward | |
353 | + compatibility, 'off' or 'fast' should be used instead. | |
354 | + 0|off : disable slab object sanitization | |
355 | + 1|fast: enable slab object sanitization excluding | |
356 | + whitelisted slabs (default) | |
357 | + full : sanitize all slabs, even the whitelisted ones | |
358 | + | |
359 | + pax_softmode= 0/1 to disable/enable PaX softmode on boot already. | |
360 | + | |
361 | + pax_extra_latent_entropy | |
362 | + Enable a very simple form of latent entropy extraction | |
363 | + from the first 4GB of memory as the bootmem allocator | |
364 | + passes the memory pages to the buddy allocator. | |
365 | + | |
366 | + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF | |
367 | + when the processor supports PCID. | |
368 | + | |
369 | pcbit= [HW,ISDN] | |
370 | ||
371 | pcd. [PARIDE] | |
372 | diff --git a/Makefile b/Makefile | |
373 | index 6276fca..e21ed81 100644 | |
374 | --- a/Makefile | |
375 | +++ b/Makefile | |
376 | @@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ | |
377 | HOSTCC = gcc | |
378 | HOSTCXX = g++ | |
379 | HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89 | |
380 | -HOSTCXXFLAGS = -O2 | |
381 | +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks | |
382 | +HOSTCFLAGS += $(call cc-option, -Wno-empty-body) | |
383 | +HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds | |
384 | ||
385 | ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) | |
386 | HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \ | |
387 | @@ -445,8 +447,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ | |
388 | # Rules shared between *config targets and build targets | |
389 | ||
390 | # Basic helpers built in scripts/ | |
391 | -PHONY += scripts_basic | |
392 | -scripts_basic: | |
393 | +PHONY += scripts_basic gcc-plugins | |
394 | +scripts_basic: gcc-plugins | |
395 | $(Q)$(MAKE) $(build)=scripts/basic | |
396 | $(Q)rm -f .tmp_quiet_recordmcount | |
397 | ||
398 | @@ -620,6 +622,72 @@ endif | |
399 | # Tell gcc to never replace conditional load with a non-conditional one | |
400 | KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) | |
401 | ||
402 | +ifndef DISABLE_PAX_PLUGINS | |
403 | +ifeq ($(call cc-ifversion, -ge, 0408, y), y) | |
404 | +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)") | |
405 | +else | |
406 | +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)") | |
407 | +endif | |
408 | +ifneq ($(PLUGINCC),) | |
409 | +ifdef CONFIG_PAX_CONSTIFY_PLUGIN | |
410 | +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN | |
411 | +endif | |
412 | +ifdef CONFIG_PAX_MEMORY_STACKLEAK | |
413 | +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN | |
414 | +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 | |
415 | +endif | |
416 | +ifdef CONFIG_KALLOCSTAT_PLUGIN | |
417 | +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so | |
418 | +endif | |
419 | +ifdef CONFIG_PAX_KERNEXEC_PLUGIN | |
420 | +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so | |
421 | +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN | |
422 | +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN | |
423 | +endif | |
424 | +ifdef CONFIG_GRKERNSEC_RANDSTRUCT | |
425 | +RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN | |
426 | +ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE | |
427 | +RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode | |
428 | +endif | |
429 | +endif | |
430 | +ifdef CONFIG_CHECKER_PLUGIN | |
431 | +ifeq ($(call cc-ifversion, -ge, 0406, y), y) | |
432 | +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN | |
433 | +endif | |
434 | +endif | |
435 | +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so | |
436 | +ifdef CONFIG_PAX_SIZE_OVERFLOW | |
437 | +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN | |
438 | +endif | |
439 | +ifdef CONFIG_PAX_LATENT_ENTROPY | |
440 | +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN | |
441 | +endif | |
442 | +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK | |
443 | +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN | |
444 | +endif | |
445 | +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) | |
446 | +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) | |
447 | +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS) | |
448 | +GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS) | |
449 | +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS) | |
450 | +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS | |
451 | +ifeq ($(KBUILD_EXTMOD),) | |
452 | +gcc-plugins: | |
453 | + $(Q)$(MAKE) $(build)=tools/gcc | |
454 | +else | |
455 | +gcc-plugins: ; | |
456 | +endif | |
457 | +else | |
458 | +gcc-plugins: | |
459 | +ifeq ($(call cc-ifversion, -ge, 0405, y), y) | |
460 | + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) | |
461 | +else | |
462 | + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" | |
463 | +endif | |
464 | + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active." | |
465 | +endif | |
466 | +endif | |
467 | + | |
468 | ifdef CONFIG_READABLE_ASM | |
469 | # Disable optimizations that make assembler listings hard to read. | |
470 | # reorder blocks reorders the control in the function | |
471 | @@ -712,7 +780,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g) | |
472 | else | |
473 | KBUILD_CFLAGS += -g | |
474 | endif | |
475 | -KBUILD_AFLAGS += -Wa,-gdwarf-2 | |
476 | +KBUILD_AFLAGS += -Wa,--gdwarf-2 | |
477 | endif | |
478 | ifdef CONFIG_DEBUG_INFO_DWARF4 | |
479 | KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,) | |
480 | @@ -877,7 +945,7 @@ export mod_sign_cmd | |
481 | ||
482 | ||
483 | ifeq ($(KBUILD_EXTMOD),) | |
484 | -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ | |
485 | +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ | |
486 | ||
487 | vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ | |
488 | $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ | |
489 | @@ -924,6 +992,8 @@ endif | |
490 | ||
491 | # The actual objects are generated when descending, | |
492 | # make sure no implicit rule kicks in | |
493 | +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
494 | +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
495 | $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; | |
496 | ||
497 | # Handle descending into subdirectories listed in $(vmlinux-dirs) | |
498 | @@ -933,7 +1003,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; | |
499 | # Error messages still appears in the original language | |
500 | ||
501 | PHONY += $(vmlinux-dirs) | |
502 | -$(vmlinux-dirs): prepare scripts | |
503 | +$(vmlinux-dirs): gcc-plugins prepare scripts | |
504 | $(Q)$(MAKE) $(build)=$@ | |
505 | ||
506 | define filechk_kernel.release | |
507 | @@ -976,10 +1046,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ | |
508 | ||
509 | archprepare: archheaders archscripts prepare1 scripts_basic | |
510 | ||
511 | +prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
512 | +prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
513 | prepare0: archprepare FORCE | |
514 | $(Q)$(MAKE) $(build)=. | |
515 | ||
516 | # All the preparing.. | |
517 | +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) | |
518 | prepare: prepare0 | |
519 | ||
520 | # Generate some files | |
521 | @@ -1094,6 +1167,8 @@ all: modules | |
522 | # using awk while concatenating to the final file. | |
523 | ||
524 | PHONY += modules | |
525 | +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
526 | +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
527 | modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin | |
528 | $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order | |
529 | @$(kecho) ' Building modules, stage 2.'; | |
530 | @@ -1109,7 +1184,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) | |
531 | ||
532 | # Target to prepare building external modules | |
533 | PHONY += modules_prepare | |
534 | -modules_prepare: prepare scripts | |
535 | +modules_prepare: gcc-plugins prepare scripts | |
536 | ||
537 | # Target to install modules | |
538 | PHONY += modules_install | |
539 | @@ -1175,7 +1250,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ | |
540 | Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ | |
541 | signing_key.priv signing_key.x509 x509.genkey \ | |
542 | extra_certificates signing_key.x509.keyid \ | |
543 | - signing_key.x509.signer include/linux/version.h | |
544 | + signing_key.x509.signer include/linux/version.h \ | |
545 | + tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \ | |
546 | + tools/gcc/size_overflow_plugin/size_overflow_hash.h \ | |
547 | + tools/gcc/randomize_layout_seed.h | |
548 | ||
549 | # clean - Delete most, but leave enough to build external modules | |
550 | # | |
551 | @@ -1214,7 +1292,7 @@ distclean: mrproper | |
552 | @find $(srctree) $(RCS_FIND_IGNORE) \ | |
553 | \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ | |
554 | -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ | |
555 | - -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \ | |
556 | + -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \ | |
557 | -type f -print | xargs rm -f | |
558 | ||
559 | ||
560 | @@ -1380,6 +1458,8 @@ PHONY += $(module-dirs) modules | |
561 | $(module-dirs): crmodverdir $(objtree)/Module.symvers | |
562 | $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) | |
563 | ||
564 | +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
565 | +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
566 | modules: $(module-dirs) | |
567 | @$(kecho) ' Building modules, stage 2.'; | |
568 | $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost | |
569 | @@ -1520,17 +1600,21 @@ else | |
570 | target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) | |
571 | endif | |
572 | ||
573 | -%.s: %.c prepare scripts FORCE | |
574 | +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
575 | +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
576 | +%.s: %.c gcc-plugins prepare scripts FORCE | |
577 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
578 | %.i: %.c prepare scripts FORCE | |
579 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
580 | -%.o: %.c prepare scripts FORCE | |
581 | +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
582 | +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
583 | +%.o: %.c gcc-plugins prepare scripts FORCE | |
584 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
585 | %.lst: %.c prepare scripts FORCE | |
586 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
587 | -%.s: %.S prepare scripts FORCE | |
588 | +%.s: %.S gcc-plugins prepare scripts FORCE | |
589 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
590 | -%.o: %.S prepare scripts FORCE | |
591 | +%.o: %.S gcc-plugins prepare scripts FORCE | |
592 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
593 | %.symtypes: %.c prepare scripts FORCE | |
594 | $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) | |
595 | @@ -1542,11 +1626,15 @@ endif | |
596 | $(build)=$(build-dir) | |
597 | # Make sure the latest headers are built for Documentation | |
598 | Documentation/: headers_install | |
599 | -%/: prepare scripts FORCE | |
600 | +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
601 | +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
602 | +%/: gcc-plugins prepare scripts FORCE | |
603 | $(cmd_crmodverdir) | |
604 | $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ | |
605 | $(build)=$(build-dir) | |
606 | -%.ko: prepare scripts FORCE | |
607 | +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) | |
608 | +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) | |
609 | +%.ko: gcc-plugins prepare scripts FORCE | |
610 | $(cmd_crmodverdir) | |
611 | $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ | |
612 | $(build)=$(build-dir) $(@:.ko=.o) | |
613 | diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h | |
614 | index 8f8eafb..3405f46 100644 | |
615 | --- a/arch/alpha/include/asm/atomic.h | |
616 | +++ b/arch/alpha/include/asm/atomic.h | |
617 | @@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |
618 | #define atomic_dec(v) atomic_sub(1,(v)) | |
619 | #define atomic64_dec(v) atomic64_sub(1,(v)) | |
620 | ||
621 | +#define atomic64_read_unchecked(v) atomic64_read(v) | |
622 | +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) | |
623 | +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) | |
624 | +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) | |
625 | +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) | |
626 | +#define atomic64_inc_unchecked(v) atomic64_inc(v) | |
627 | +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) | |
628 | +#define atomic64_dec_unchecked(v) atomic64_dec(v) | |
629 | +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) | |
630 | + | |
631 | #endif /* _ALPHA_ATOMIC_H */ | |
632 | diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h | |
633 | index ad368a9..fbe0f25 100644 | |
634 | --- a/arch/alpha/include/asm/cache.h | |
635 | +++ b/arch/alpha/include/asm/cache.h | |
636 | @@ -4,19 +4,19 @@ | |
637 | #ifndef __ARCH_ALPHA_CACHE_H | |
638 | #define __ARCH_ALPHA_CACHE_H | |
639 | ||
640 | +#include <linux/const.h> | |
641 | ||
642 | /* Bytes per L1 (data) cache line. */ | |
643 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) | |
644 | -# define L1_CACHE_BYTES 64 | |
645 | # define L1_CACHE_SHIFT 6 | |
646 | #else | |
647 | /* Both EV4 and EV5 are write-through, read-allocate, | |
648 | direct-mapped, physical. | |
649 | */ | |
650 | -# define L1_CACHE_BYTES 32 | |
651 | # define L1_CACHE_SHIFT 5 | |
652 | #endif | |
653 | ||
654 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
655 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | |
656 | ||
657 | #endif | |
658 | diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h | |
659 | index 968d999..d36b2df 100644 | |
660 | --- a/arch/alpha/include/asm/elf.h | |
661 | +++ b/arch/alpha/include/asm/elf.h | |
662 | @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |
663 | ||
664 | #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) | |
665 | ||
666 | +#ifdef CONFIG_PAX_ASLR | |
667 | +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) | |
668 | + | |
669 | +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) | |
670 | +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) | |
671 | +#endif | |
672 | + | |
673 | /* $0 is set by ld.so to a pointer to a function which might be | |
674 | registered using atexit. This provides a mean for the dynamic | |
675 | linker to call DT_FINI functions for shared libraries that have | |
676 | diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h | |
677 | index aab14a0..b4fa3e7 100644 | |
678 | --- a/arch/alpha/include/asm/pgalloc.h | |
679 | +++ b/arch/alpha/include/asm/pgalloc.h | |
680 | @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | |
681 | pgd_set(pgd, pmd); | |
682 | } | |
683 | ||
684 | +static inline void | |
685 | +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | |
686 | +{ | |
687 | + pgd_populate(mm, pgd, pmd); | |
688 | +} | |
689 | + | |
690 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | |
691 | ||
692 | static inline void | |
693 | diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h | |
694 | index d8f9b7e..f6222fa 100644 | |
695 | --- a/arch/alpha/include/asm/pgtable.h | |
696 | +++ b/arch/alpha/include/asm/pgtable.h | |
697 | @@ -102,6 +102,17 @@ struct vm_area_struct; | |
698 | #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) | |
699 | #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) | |
700 | #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) | |
701 | + | |
702 | +#ifdef CONFIG_PAX_PAGEEXEC | |
703 | +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) | |
704 | +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) | |
705 | +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) | |
706 | +#else | |
707 | +# define PAGE_SHARED_NOEXEC PAGE_SHARED | |
708 | +# define PAGE_COPY_NOEXEC PAGE_COPY | |
709 | +# define PAGE_READONLY_NOEXEC PAGE_READONLY | |
710 | +#endif | |
711 | + | |
712 | #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) | |
713 | ||
714 | #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) | |
715 | diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c | |
716 | index 2fd00b7..cfd5069 100644 | |
717 | --- a/arch/alpha/kernel/module.c | |
718 | +++ b/arch/alpha/kernel/module.c | |
719 | @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, | |
720 | ||
721 | /* The small sections were sorted to the end of the segment. | |
722 | The following should definitely cover them. */ | |
723 | - gp = (u64)me->module_core + me->core_size - 0x8000; | |
724 | + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; | |
725 | got = sechdrs[me->arch.gotsecindex].sh_addr; | |
726 | ||
727 | for (i = 0; i < n; i++) { | |
728 | diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c | |
729 | index f9c732e..78fbb0f 100644 | |
730 | --- a/arch/alpha/kernel/osf_sys.c | |
731 | +++ b/arch/alpha/kernel/osf_sys.c | |
732 | @@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) | |
733 | generic version except that we know how to honor ADDR_LIMIT_32BIT. */ | |
734 | ||
735 | static unsigned long | |
736 | -arch_get_unmapped_area_1(unsigned long addr, unsigned long len, | |
737 | - unsigned long limit) | |
738 | +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len, | |
739 | + unsigned long limit, unsigned long flags) | |
740 | { | |
741 | struct vm_unmapped_area_info info; | |
742 | + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); | |
743 | ||
744 | info.flags = 0; | |
745 | info.length = len; | |
746 | @@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, | |
747 | info.high_limit = limit; | |
748 | info.align_mask = 0; | |
749 | info.align_offset = 0; | |
750 | + info.threadstack_offset = offset; | |
751 | return vm_unmapped_area(&info); | |
752 | } | |
753 | ||
754 | @@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
755 | merely specific addresses, but regions of memory -- perhaps | |
756 | this feature should be incorporated into all ports? */ | |
757 | ||
758 | +#ifdef CONFIG_PAX_RANDMMAP | |
759 | + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) | |
760 | +#endif | |
761 | + | |
762 | if (addr) { | |
763 | - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); | |
764 | + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags); | |
765 | if (addr != (unsigned long) -ENOMEM) | |
766 | return addr; | |
767 | } | |
768 | ||
769 | /* Next, try allocating at TASK_UNMAPPED_BASE. */ | |
770 | - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), | |
771 | - len, limit); | |
772 | + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags); | |
773 | + | |
774 | if (addr != (unsigned long) -ENOMEM) | |
775 | return addr; | |
776 | ||
777 | /* Finally, try allocating in low memory. */ | |
778 | - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); | |
779 | + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags); | |
780 | ||
781 | return addr; | |
782 | } | |
783 | diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c | |
784 | index 98838a0..b304fb4 100644 | |
785 | --- a/arch/alpha/mm/fault.c | |
786 | +++ b/arch/alpha/mm/fault.c | |
787 | @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm) | |
788 | __reload_thread(pcb); | |
789 | } | |
790 | ||
791 | +#ifdef CONFIG_PAX_PAGEEXEC | |
792 | +/* | |
793 | + * PaX: decide what to do with offenders (regs->pc = fault address) | |
794 | + * | |
795 | + * returns 1 when task should be killed | |
796 | + * 2 when patched PLT trampoline was detected | |
797 | + * 3 when unpatched PLT trampoline was detected | |
798 | + */ | |
799 | +static int pax_handle_fetch_fault(struct pt_regs *regs) | |
800 | +{ | |
801 | + | |
802 | +#ifdef CONFIG_PAX_EMUPLT | |
803 | + int err; | |
804 | + | |
805 | + do { /* PaX: patched PLT emulation #1 */ | |
806 | + unsigned int ldah, ldq, jmp; | |
807 | + | |
808 | + err = get_user(ldah, (unsigned int *)regs->pc); | |
809 | + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); | |
810 | + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); | |
811 | + | |
812 | + if (err) | |
813 | + break; | |
814 | + | |
815 | + if ((ldah & 0xFFFF0000U) == 0x277B0000U && | |
816 | + (ldq & 0xFFFF0000U) == 0xA77B0000U && | |
817 | + jmp == 0x6BFB0000U) | |
818 | + { | |
819 | + unsigned long r27, addr; | |
820 | + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; | |
821 | + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; | |
822 | + | |
823 | + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); | |
824 | + err = get_user(r27, (unsigned long *)addr); | |
825 | + if (err) | |
826 | + break; | |
827 | + | |
828 | + regs->r27 = r27; | |
829 | + regs->pc = r27; | |
830 | + return 2; | |
831 | + } | |
832 | + } while (0); | |
833 | + | |
834 | + do { /* PaX: patched PLT emulation #2 */ | |
835 | + unsigned int ldah, lda, br; | |
836 | + | |
837 | + err = get_user(ldah, (unsigned int *)regs->pc); | |
838 | + err |= get_user(lda, (unsigned int *)(regs->pc+4)); | |
839 | + err |= get_user(br, (unsigned int *)(regs->pc+8)); | |
840 | + | |
841 | + if (err) | |
842 | + break; | |
843 | + | |
844 | + if ((ldah & 0xFFFF0000U) == 0x277B0000U && | |
845 | + (lda & 0xFFFF0000U) == 0xA77B0000U && | |
846 | + (br & 0xFFE00000U) == 0xC3E00000U) | |
847 | + { | |
848 | + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; | |
849 | + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; | |
850 | + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; | |
851 | + | |
852 | + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); | |
853 | + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); | |
854 | + return 2; | |
855 | + } | |
856 | + } while (0); | |
857 | + | |
858 | + do { /* PaX: unpatched PLT emulation */ | |
859 | + unsigned int br; | |
860 | + | |
861 | + err = get_user(br, (unsigned int *)regs->pc); | |
862 | + | |
863 | + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { | |
864 | + unsigned int br2, ldq, nop, jmp; | |
865 | + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; | |
866 | + | |
867 | + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); | |
868 | + err = get_user(br2, (unsigned int *)addr); | |
869 | + err |= get_user(ldq, (unsigned int *)(addr+4)); | |
870 | + err |= get_user(nop, (unsigned int *)(addr+8)); | |
871 | + err |= get_user(jmp, (unsigned int *)(addr+12)); | |
872 | + err |= get_user(resolver, (unsigned long *)(addr+16)); | |
873 | + | |
874 | + if (err) | |
875 | + break; | |
876 | + | |
877 | + if (br2 == 0xC3600000U && | |
878 | + ldq == 0xA77B000CU && | |
879 | + nop == 0x47FF041FU && | |
880 | + jmp == 0x6B7B0000U) | |
881 | + { | |
882 | + regs->r28 = regs->pc+4; | |
883 | + regs->r27 = addr+16; | |
884 | + regs->pc = resolver; | |
885 | + return 3; | |
886 | + } | |
887 | + } | |
888 | + } while (0); | |
889 | +#endif | |
890 | + | |
891 | + return 1; | |
892 | +} | |
893 | + | |
894 | +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) | |
895 | +{ | |
896 | + unsigned long i; | |
897 | + | |
898 | + printk(KERN_ERR "PAX: bytes at PC: "); | |
899 | + for (i = 0; i < 5; i++) { | |
900 | + unsigned int c; | |
901 | + if (get_user(c, (unsigned int *)pc+i)) | |
902 | + printk(KERN_CONT "???????? "); | |
903 | + else | |
904 | + printk(KERN_CONT "%08x ", c); | |
905 | + } | |
906 | + printk("\n"); | |
907 | +} | |
908 | +#endif | |
909 | ||
910 | /* | |
911 | * This routine handles page faults. It determines the address, | |
912 | @@ -133,8 +251,29 @@ retry: | |
913 | good_area: | |
914 | si_code = SEGV_ACCERR; | |
915 | if (cause < 0) { | |
916 | - if (!(vma->vm_flags & VM_EXEC)) | |
917 | + if (!(vma->vm_flags & VM_EXEC)) { | |
918 | + | |
919 | +#ifdef CONFIG_PAX_PAGEEXEC | |
920 | + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) | |
921 | + goto bad_area; | |
922 | + | |
923 | + up_read(&mm->mmap_sem); | |
924 | + switch (pax_handle_fetch_fault(regs)) { | |
925 | + | |
926 | +#ifdef CONFIG_PAX_EMUPLT | |
927 | + case 2: | |
928 | + case 3: | |
929 | + return; | |
930 | +#endif | |
931 | + | |
932 | + } | |
933 | + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); | |
934 | + do_group_exit(SIGKILL); | |
935 | +#else | |
936 | goto bad_area; | |
937 | +#endif | |
938 | + | |
939 | + } | |
940 | } else if (!cause) { | |
941 | /* Allow reads even for write-only mappings */ | |
942 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) | |
943 | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig | |
944 | index 89c4b5c..847a7be 100644 | |
945 | --- a/arch/arm/Kconfig | |
946 | +++ b/arch/arm/Kconfig | |
947 | @@ -1740,7 +1740,7 @@ config ALIGNMENT_TRAP | |
948 | ||
949 | config UACCESS_WITH_MEMCPY | |
950 | bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()" | |
951 | - depends on MMU | |
952 | + depends on MMU && !PAX_MEMORY_UDEREF | |
953 | default y if CPU_FEROCEON | |
954 | help | |
955 | Implement faster copy_to_user and clear_user methods for CPU | |
956 | @@ -2004,6 +2004,7 @@ config XIP_PHYS_ADDR | |
957 | config KEXEC | |
958 | bool "Kexec system call (EXPERIMENTAL)" | |
959 | depends on (!SMP || PM_SLEEP_SMP) | |
960 | + depends on !GRKERNSEC_KMEM | |
961 | help | |
962 | kexec is a system call that implements the ability to shutdown your | |
963 | current kernel, and to start another kernel. It is like a reboot | |
964 | diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h | |
965 | index e22c119..eaa807d 100644 | |
966 | --- a/arch/arm/include/asm/atomic.h | |
967 | +++ b/arch/arm/include/asm/atomic.h | |
968 | @@ -18,17 +18,41 @@ | |
969 | #include <asm/barrier.h> | |
970 | #include <asm/cmpxchg.h> | |
971 | ||
972 | +#ifdef CONFIG_GENERIC_ATOMIC64 | |
973 | +#include <asm-generic/atomic64.h> | |
974 | +#endif | |
975 | + | |
976 | #define ATOMIC_INIT(i) { (i) } | |
977 | ||
978 | #ifdef __KERNEL__ | |
979 | ||
980 | +#ifdef CONFIG_THUMB2_KERNEL | |
981 | +#define REFCOUNT_TRAP_INSN "bkpt 0xf1" | |
982 | +#else | |
983 | +#define REFCOUNT_TRAP_INSN "bkpt 0xf103" | |
984 | +#endif | |
985 | + | |
986 | +#define _ASM_EXTABLE(from, to) \ | |
987 | +" .pushsection __ex_table,\"a\"\n"\ | |
988 | +" .align 3\n" \ | |
989 | +" .long " #from ", " #to"\n" \ | |
990 | +" .popsection" | |
991 | + | |
992 | /* | |
993 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
994 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
995 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
996 | */ | |
997 | #define atomic_read(v) ACCESS_ONCE((v)->counter) | |
998 | +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) | |
999 | +{ | |
1000 | + return ACCESS_ONCE(v->counter); | |
1001 | +} | |
1002 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
1003 | +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) | |
1004 | +{ | |
1005 | + v->counter = i; | |
1006 | +} | |
1007 | ||
1008 | #if __LINUX_ARM_ARCH__ >= 6 | |
1009 | ||
1010 | @@ -38,26 +62,50 @@ | |
1011 | * to ensure that the update happens. | |
1012 | */ | |
1013 | ||
1014 | -#define ATOMIC_OP(op, c_op, asm_op) \ | |
1015 | -static inline void atomic_##op(int i, atomic_t *v) \ | |
1016 | +#ifdef CONFIG_PAX_REFCOUNT | |
1017 | +#define __OVERFLOW_POST \ | |
1018 | + " bvc 3f\n" \ | |
1019 | + "2: " REFCOUNT_TRAP_INSN "\n"\ | |
1020 | + "3:\n" | |
1021 | +#define __OVERFLOW_POST_RETURN \ | |
1022 | + " bvc 3f\n" \ | |
1023 | +" mov %0, %1\n" \ | |
1024 | + "2: " REFCOUNT_TRAP_INSN "\n"\ | |
1025 | + "3:\n" | |
1026 | +#define __OVERFLOW_EXTABLE \ | |
1027 | + "4:\n" \ | |
1028 | + _ASM_EXTABLE(2b, 4b) | |
1029 | +#else | |
1030 | +#define __OVERFLOW_POST | |
1031 | +#define __OVERFLOW_POST_RETURN | |
1032 | +#define __OVERFLOW_EXTABLE | |
1033 | +#endif | |
1034 | + | |
1035 | +#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \ | |
1036 | +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \ | |
1037 | { \ | |
1038 | unsigned long tmp; \ | |
1039 | int result; \ | |
1040 | \ | |
1041 | prefetchw(&v->counter); \ | |
1042 | - __asm__ __volatile__("@ atomic_" #op "\n" \ | |
1043 | + __asm__ __volatile__("@ atomic_" #op #suffix "\n" \ | |
1044 | "1: ldrex %0, [%3]\n" \ | |
1045 | " " #asm_op " %0, %0, %4\n" \ | |
1046 | + post_op \ | |
1047 | " strex %1, %0, [%3]\n" \ | |
1048 | " teq %1, #0\n" \ | |
1049 | -" bne 1b" \ | |
1050 | +" bne 1b\n" \ | |
1051 | + extable \ | |
1052 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
1053 | : "r" (&v->counter), "Ir" (i) \ | |
1054 | : "cc"); \ | |
1055 | } \ | |
1056 | ||
1057 | -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
1058 | -static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
1059 | +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\ | |
1060 | + __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE) | |
1061 | + | |
1062 | +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \ | |
1063 | +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\ | |
1064 | { \ | |
1065 | unsigned long tmp; \ | |
1066 | int result; \ | |
1067 | @@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
1068 | smp_mb(); \ | |
1069 | prefetchw(&v->counter); \ | |
1070 | \ | |
1071 | - __asm__ __volatile__("@ atomic_" #op "_return\n" \ | |
1072 | + __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \ | |
1073 | "1: ldrex %0, [%3]\n" \ | |
1074 | " " #asm_op " %0, %0, %4\n" \ | |
1075 | + post_op \ | |
1076 | " strex %1, %0, [%3]\n" \ | |
1077 | " teq %1, #0\n" \ | |
1078 | -" bne 1b" \ | |
1079 | +" bne 1b\n" \ | |
1080 | + extable \ | |
1081 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
1082 | : "r" (&v->counter), "Ir" (i) \ | |
1083 | : "cc"); \ | |
1084 | @@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
1085 | return result; \ | |
1086 | } | |
1087 | ||
1088 | +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\ | |
1089 | + __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE) | |
1090 | + | |
1091 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |
1092 | { | |
1093 | int oldval; | |
1094 | @@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |
1095 | __asm__ __volatile__ ("@ atomic_add_unless\n" | |
1096 | "1: ldrex %0, [%4]\n" | |
1097 | " teq %0, %5\n" | |
1098 | -" beq 2f\n" | |
1099 | -" add %1, %0, %6\n" | |
1100 | +" beq 4f\n" | |
1101 | +" adds %1, %0, %6\n" | |
1102 | + | |
1103 | +#ifdef CONFIG_PAX_REFCOUNT | |
1104 | +" bvc 3f\n" | |
1105 | +"2: " REFCOUNT_TRAP_INSN "\n" | |
1106 | +"3:\n" | |
1107 | +#endif | |
1108 | + | |
1109 | " strex %2, %1, [%4]\n" | |
1110 | " teq %2, #0\n" | |
1111 | " bne 1b\n" | |
1112 | -"2:" | |
1113 | +"4:" | |
1114 | + | |
1115 | +#ifdef CONFIG_PAX_REFCOUNT | |
1116 | + _ASM_EXTABLE(2b, 4b) | |
1117 | +#endif | |
1118 | + | |
1119 | : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) | |
1120 | : "r" (&v->counter), "r" (u), "r" (a) | |
1121 | : "cc"); | |
1122 | @@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |
1123 | return oldval; | |
1124 | } | |
1125 | ||
1126 | +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) | |
1127 | +{ | |
1128 | + unsigned long oldval, res; | |
1129 | + | |
1130 | + smp_mb(); | |
1131 | + | |
1132 | + do { | |
1133 | + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" | |
1134 | + "ldrex %1, [%3]\n" | |
1135 | + "mov %0, #0\n" | |
1136 | + "teq %1, %4\n" | |
1137 | + "strexeq %0, %5, [%3]\n" | |
1138 | + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
1139 | + : "r" (&ptr->counter), "Ir" (old), "r" (new) | |
1140 | + : "cc"); | |
1141 | + } while (res); | |
1142 | + | |
1143 | + smp_mb(); | |
1144 | + | |
1145 | + return oldval; | |
1146 | +} | |
1147 | + | |
1148 | #else /* ARM_ARCH_6 */ | |
1149 | ||
1150 | #ifdef CONFIG_SMP | |
1151 | #error SMP not supported on pre-ARMv6 CPUs | |
1152 | #endif | |
1153 | ||
1154 | -#define ATOMIC_OP(op, c_op, asm_op) \ | |
1155 | -static inline void atomic_##op(int i, atomic_t *v) \ | |
1156 | +#define __ATOMIC_OP(op, suffix, c_op, asm_op) \ | |
1157 | +static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \ | |
1158 | { \ | |
1159 | unsigned long flags; \ | |
1160 | \ | |
1161 | @@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |
1162 | raw_local_irq_restore(flags); \ | |
1163 | } \ | |
1164 | ||
1165 | -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
1166 | -static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
1167 | +#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \ | |
1168 | + __ATOMIC_OP(op, _unchecked, c_op, asm_op) | |
1169 | + | |
1170 | +#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \ | |
1171 | +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\ | |
1172 | { \ | |
1173 | unsigned long flags; \ | |
1174 | int val; \ | |
1175 | @@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
1176 | return val; \ | |
1177 | } | |
1178 | ||
1179 | +#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\ | |
1180 | + __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op) | |
1181 | + | |
1182 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
1183 | { | |
1184 | int ret; | |
1185 | @@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
1186 | return ret; | |
1187 | } | |
1188 | ||
1189 | +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) | |
1190 | +{ | |
1191 | + return atomic_cmpxchg((atomic_t *)v, old, new); | |
1192 | +} | |
1193 | + | |
1194 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |
1195 | { | |
1196 | int c, old; | |
1197 | @@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub) | |
1198 | ||
1199 | #undef ATOMIC_OPS | |
1200 | #undef ATOMIC_OP_RETURN | |
1201 | +#undef __ATOMIC_OP_RETURN | |
1202 | #undef ATOMIC_OP | |
1203 | +#undef __ATOMIC_OP | |
1204 | ||
1205 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | |
1206 | +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) | |
1207 | +{ | |
1208 | + return xchg(&v->counter, new); | |
1209 | +} | |
1210 | ||
1211 | #define atomic_inc(v) atomic_add(1, v) | |
1212 | +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) | |
1213 | +{ | |
1214 | + atomic_add_unchecked(1, v); | |
1215 | +} | |
1216 | #define atomic_dec(v) atomic_sub(1, v) | |
1217 | +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) | |
1218 | +{ | |
1219 | + atomic_sub_unchecked(1, v); | |
1220 | +} | |
1221 | ||
1222 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
1223 | +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) | |
1224 | +{ | |
1225 | + return atomic_add_return_unchecked(1, v) == 0; | |
1226 | +} | |
1227 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
1228 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
1229 | +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) | |
1230 | +{ | |
1231 | + return atomic_add_return_unchecked(1, v); | |
1232 | +} | |
1233 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
1234 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
1235 | ||
1236 | @@ -216,6 +336,14 @@ typedef struct { | |
1237 | long long counter; | |
1238 | } atomic64_t; | |
1239 | ||
1240 | +#ifdef CONFIG_PAX_REFCOUNT | |
1241 | +typedef struct { | |
1242 | + long long counter; | |
1243 | +} atomic64_unchecked_t; | |
1244 | +#else | |
1245 | +typedef atomic64_t atomic64_unchecked_t; | |
1246 | +#endif | |
1247 | + | |
1248 | #define ATOMIC64_INIT(i) { (i) } | |
1249 | ||
1250 | #ifdef CONFIG_ARM_LPAE | |
1251 | @@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v) | |
1252 | return result; | |
1253 | } | |
1254 | ||
1255 | +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) | |
1256 | +{ | |
1257 | + long long result; | |
1258 | + | |
1259 | + __asm__ __volatile__("@ atomic64_read_unchecked\n" | |
1260 | +" ldrd %0, %H0, [%1]" | |
1261 | + : "=&r" (result) | |
1262 | + : "r" (&v->counter), "Qo" (v->counter) | |
1263 | + ); | |
1264 | + | |
1265 | + return result; | |
1266 | +} | |
1267 | + | |
1268 | static inline void atomic64_set(atomic64_t *v, long long i) | |
1269 | { | |
1270 | __asm__ __volatile__("@ atomic64_set\n" | |
1271 | @@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |
1272 | : "r" (&v->counter), "r" (i) | |
1273 | ); | |
1274 | } | |
1275 | + | |
1276 | +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) | |
1277 | +{ | |
1278 | + __asm__ __volatile__("@ atomic64_set_unchecked\n" | |
1279 | +" strd %2, %H2, [%1]" | |
1280 | + : "=Qo" (v->counter) | |
1281 | + : "r" (&v->counter), "r" (i) | |
1282 | + ); | |
1283 | +} | |
1284 | #else | |
1285 | static inline long long atomic64_read(const atomic64_t *v) | |
1286 | { | |
1287 | @@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v) | |
1288 | return result; | |
1289 | } | |
1290 | ||
1291 | +static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) | |
1292 | +{ | |
1293 | + long long result; | |
1294 | + | |
1295 | + __asm__ __volatile__("@ atomic64_read_unchecked\n" | |
1296 | +" ldrexd %0, %H0, [%1]" | |
1297 | + : "=&r" (result) | |
1298 | + : "r" (&v->counter), "Qo" (v->counter) | |
1299 | + ); | |
1300 | + | |
1301 | + return result; | |
1302 | +} | |
1303 | + | |
1304 | static inline void atomic64_set(atomic64_t *v, long long i) | |
1305 | { | |
1306 | long long tmp; | |
1307 | @@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |
1308 | : "r" (&v->counter), "r" (i) | |
1309 | : "cc"); | |
1310 | } | |
1311 | + | |
1312 | +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) | |
1313 | +{ | |
1314 | + long long tmp; | |
1315 | + | |
1316 | + prefetchw(&v->counter); | |
1317 | + __asm__ __volatile__("@ atomic64_set_unchecked\n" | |
1318 | +"1: ldrexd %0, %H0, [%2]\n" | |
1319 | +" strexd %0, %3, %H3, [%2]\n" | |
1320 | +" teq %0, #0\n" | |
1321 | +" bne 1b" | |
1322 | + : "=&r" (tmp), "=Qo" (v->counter) | |
1323 | + : "r" (&v->counter), "r" (i) | |
1324 | + : "cc"); | |
1325 | +} | |
1326 | #endif | |
1327 | ||
1328 | -#define ATOMIC64_OP(op, op1, op2) \ | |
1329 | -static inline void atomic64_##op(long long i, atomic64_t *v) \ | |
1330 | +#undef __OVERFLOW_POST_RETURN | |
1331 | +#define __OVERFLOW_POST_RETURN \ | |
1332 | + " bvc 3f\n" \ | |
1333 | +" mov %0, %1\n" \ | |
1334 | +" mov %H0, %H1\n" \ | |
1335 | + "2: " REFCOUNT_TRAP_INSN "\n"\ | |
1336 | + "3:\n" | |
1337 | + | |
1338 | +#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \ | |
1339 | +static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\ | |
1340 | { \ | |
1341 | long long result; \ | |
1342 | unsigned long tmp; \ | |
1343 | \ | |
1344 | prefetchw(&v->counter); \ | |
1345 | - __asm__ __volatile__("@ atomic64_" #op "\n" \ | |
1346 | + __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \ | |
1347 | "1: ldrexd %0, %H0, [%3]\n" \ | |
1348 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
1349 | " " #op2 " %R0, %R0, %R4\n" \ | |
1350 | + post_op \ | |
1351 | " strexd %1, %0, %H0, [%3]\n" \ | |
1352 | " teq %1, #0\n" \ | |
1353 | -" bne 1b" \ | |
1354 | +" bne 1b\n" \ | |
1355 | + extable \ | |
1356 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
1357 | : "r" (&v->counter), "r" (i) \ | |
1358 | : "cc"); \ | |
1359 | } \ | |
1360 | ||
1361 | -#define ATOMIC64_OP_RETURN(op, op1, op2) \ | |
1362 | -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ | |
1363 | +#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \ | |
1364 | + __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE) | |
1365 | + | |
1366 | +#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \ | |
1367 | +static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \ | |
1368 | { \ | |
1369 | long long result; \ | |
1370 | unsigned long tmp; \ | |
1371 | @@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ | |
1372 | smp_mb(); \ | |
1373 | prefetchw(&v->counter); \ | |
1374 | \ | |
1375 | - __asm__ __volatile__("@ atomic64_" #op "_return\n" \ | |
1376 | + __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \ | |
1377 | "1: ldrexd %0, %H0, [%3]\n" \ | |
1378 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
1379 | " " #op2 " %R0, %R0, %R4\n" \ | |
1380 | + post_op \ | |
1381 | " strexd %1, %0, %H0, [%3]\n" \ | |
1382 | " teq %1, #0\n" \ | |
1383 | -" bne 1b" \ | |
1384 | +" bne 1b\n" \ | |
1385 | + extable \ | |
1386 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
1387 | : "r" (&v->counter), "r" (i) \ | |
1388 | : "cc"); \ | |
1389 | @@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ | |
1390 | return result; \ | |
1391 | } | |
1392 | ||
1393 | +#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \ | |
1394 | + __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE) | |
1395 | + | |
1396 | #define ATOMIC64_OPS(op, op1, op2) \ | |
1397 | ATOMIC64_OP(op, op1, op2) \ | |
1398 | ATOMIC64_OP_RETURN(op, op1, op2) | |
1399 | @@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc) | |
1400 | ||
1401 | #undef ATOMIC64_OPS | |
1402 | #undef ATOMIC64_OP_RETURN | |
1403 | +#undef __ATOMIC64_OP_RETURN | |
1404 | #undef ATOMIC64_OP | |
1405 | +#undef __ATOMIC64_OP | |
1406 | +#undef __OVERFLOW_EXTABLE | |
1407 | +#undef __OVERFLOW_POST_RETURN | |
1408 | +#undef __OVERFLOW_POST | |
1409 | ||
1410 | static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, | |
1411 | long long new) | |
1412 | @@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, | |
1413 | return oldval; | |
1414 | } | |
1415 | ||
1416 | +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old, | |
1417 | + long long new) | |
1418 | +{ | |
1419 | + long long oldval; | |
1420 | + unsigned long res; | |
1421 | + | |
1422 | + smp_mb(); | |
1423 | + | |
1424 | + do { | |
1425 | + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n" | |
1426 | + "ldrexd %1, %H1, [%3]\n" | |
1427 | + "mov %0, #0\n" | |
1428 | + "teq %1, %4\n" | |
1429 | + "teqeq %H1, %H4\n" | |
1430 | + "strexdeq %0, %5, %H5, [%3]" | |
1431 | + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
1432 | + : "r" (&ptr->counter), "r" (old), "r" (new) | |
1433 | + : "cc"); | |
1434 | + } while (res); | |
1435 | + | |
1436 | + smp_mb(); | |
1437 | + | |
1438 | + return oldval; | |
1439 | +} | |
1440 | + | |
1441 | static inline long long atomic64_xchg(atomic64_t *ptr, long long new) | |
1442 | { | |
1443 | long long result; | |
1444 | @@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) | |
1445 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | |
1446 | { | |
1447 | long long result; | |
1448 | - unsigned long tmp; | |
1449 | + u64 tmp; | |
1450 | ||
1451 | smp_mb(); | |
1452 | prefetchw(&v->counter); | |
1453 | ||
1454 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
1455 | -"1: ldrexd %0, %H0, [%3]\n" | |
1456 | -" subs %Q0, %Q0, #1\n" | |
1457 | -" sbc %R0, %R0, #0\n" | |
1458 | +"1: ldrexd %1, %H1, [%3]\n" | |
1459 | +" subs %Q0, %Q1, #1\n" | |
1460 | +" sbcs %R0, %R1, #0\n" | |
1461 | + | |
1462 | +#ifdef CONFIG_PAX_REFCOUNT | |
1463 | +" bvc 3f\n" | |
1464 | +" mov %Q0, %Q1\n" | |
1465 | +" mov %R0, %R1\n" | |
1466 | +"2: " REFCOUNT_TRAP_INSN "\n" | |
1467 | +"3:\n" | |
1468 | +#endif | |
1469 | + | |
1470 | " teq %R0, #0\n" | |
1471 | -" bmi 2f\n" | |
1472 | +" bmi 4f\n" | |
1473 | " strexd %1, %0, %H0, [%3]\n" | |
1474 | " teq %1, #0\n" | |
1475 | " bne 1b\n" | |
1476 | -"2:" | |
1477 | +"4:\n" | |
1478 | + | |
1479 | +#ifdef CONFIG_PAX_REFCOUNT | |
1480 | + _ASM_EXTABLE(2b, 4b) | |
1481 | +#endif | |
1482 | + | |
1483 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) | |
1484 | : "r" (&v->counter) | |
1485 | : "cc"); | |
1486 | @@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
1487 | " teq %0, %5\n" | |
1488 | " teqeq %H0, %H5\n" | |
1489 | " moveq %1, #0\n" | |
1490 | -" beq 2f\n" | |
1491 | +" beq 4f\n" | |
1492 | " adds %Q0, %Q0, %Q6\n" | |
1493 | -" adc %R0, %R0, %R6\n" | |
1494 | +" adcs %R0, %R0, %R6\n" | |
1495 | + | |
1496 | +#ifdef CONFIG_PAX_REFCOUNT | |
1497 | +" bvc 3f\n" | |
1498 | +"2: " REFCOUNT_TRAP_INSN "\n" | |
1499 | +"3:\n" | |
1500 | +#endif | |
1501 | + | |
1502 | " strexd %2, %0, %H0, [%4]\n" | |
1503 | " teq %2, #0\n" | |
1504 | " bne 1b\n" | |
1505 | -"2:" | |
1506 | +"4:\n" | |
1507 | + | |
1508 | +#ifdef CONFIG_PAX_REFCOUNT | |
1509 | + _ASM_EXTABLE(2b, 4b) | |
1510 | +#endif | |
1511 | + | |
1512 | : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) | |
1513 | : "r" (&v->counter), "r" (u), "r" (a) | |
1514 | : "cc"); | |
1515 | @@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
1516 | ||
1517 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
1518 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
1519 | +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) | |
1520 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
1521 | +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v)) | |
1522 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
1523 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
1524 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
1525 | +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) | |
1526 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
1527 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
1528 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
1529 | diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h | |
1530 | index c6a3e73..35cca85 100644 | |
1531 | --- a/arch/arm/include/asm/barrier.h | |
1532 | +++ b/arch/arm/include/asm/barrier.h | |
1533 | @@ -63,7 +63,7 @@ | |
1534 | do { \ | |
1535 | compiletime_assert_atomic_type(*p); \ | |
1536 | smp_mb(); \ | |
1537 | - ACCESS_ONCE(*p) = (v); \ | |
1538 | + ACCESS_ONCE_RW(*p) = (v); \ | |
1539 | } while (0) | |
1540 | ||
1541 | #define smp_load_acquire(p) \ | |
1542 | diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h | |
1543 | index 75fe66b..ba3dee4 100644 | |
1544 | --- a/arch/arm/include/asm/cache.h | |
1545 | +++ b/arch/arm/include/asm/cache.h | |
1546 | @@ -4,8 +4,10 @@ | |
1547 | #ifndef __ASMARM_CACHE_H | |
1548 | #define __ASMARM_CACHE_H | |
1549 | ||
1550 | +#include <linux/const.h> | |
1551 | + | |
1552 | #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT | |
1553 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
1554 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
1555 | ||
1556 | /* | |
1557 | * Memory returned by kmalloc() may be used for DMA, so we must make | |
1558 | @@ -24,5 +26,6 @@ | |
1559 | #endif | |
1560 | ||
1561 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | |
1562 | +#define __read_only __attribute__ ((__section__(".data..read_only"))) | |
1563 | ||
1564 | #endif | |
1565 | diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h | |
1566 | index 10e78d0..dc8505d 100644 | |
1567 | --- a/arch/arm/include/asm/cacheflush.h | |
1568 | +++ b/arch/arm/include/asm/cacheflush.h | |
1569 | @@ -116,7 +116,7 @@ struct cpu_cache_fns { | |
1570 | void (*dma_unmap_area)(const void *, size_t, int); | |
1571 | ||
1572 | void (*dma_flush_range)(const void *, const void *); | |
1573 | -}; | |
1574 | +} __no_const; | |
1575 | ||
1576 | /* | |
1577 | * Select the calling method | |
1578 | diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h | |
1579 | index 5233151..87a71fa 100644 | |
1580 | --- a/arch/arm/include/asm/checksum.h | |
1581 | +++ b/arch/arm/include/asm/checksum.h | |
1582 | @@ -37,7 +37,19 @@ __wsum | |
1583 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); | |
1584 | ||
1585 | __wsum | |
1586 | -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); | |
1587 | +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); | |
1588 | + | |
1589 | +static inline __wsum | |
1590 | +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) | |
1591 | +{ | |
1592 | + __wsum ret; | |
1593 | + pax_open_userland(); | |
1594 | + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr); | |
1595 | + pax_close_userland(); | |
1596 | + return ret; | |
1597 | +} | |
1598 | + | |
1599 | + | |
1600 | ||
1601 | /* | |
1602 | * Fold a partial checksum without adding pseudo headers | |
1603 | diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h | |
1604 | index abb2c37..96db950 100644 | |
1605 | --- a/arch/arm/include/asm/cmpxchg.h | |
1606 | +++ b/arch/arm/include/asm/cmpxchg.h | |
1607 | @@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |
1608 | ||
1609 | #define xchg(ptr,x) \ | |
1610 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
1611 | +#define xchg_unchecked(ptr,x) \ | |
1612 | + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
1613 | ||
1614 | #include <asm-generic/cmpxchg-local.h> | |
1615 | ||
1616 | diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h | |
1617 | index 6ddbe44..b5e38b1 100644 | |
1618 | --- a/arch/arm/include/asm/domain.h | |
1619 | +++ b/arch/arm/include/asm/domain.h | |
1620 | @@ -48,18 +48,37 @@ | |
1621 | * Domain types | |
1622 | */ | |
1623 | #define DOMAIN_NOACCESS 0 | |
1624 | -#define DOMAIN_CLIENT 1 | |
1625 | #ifdef CONFIG_CPU_USE_DOMAINS | |
1626 | +#define DOMAIN_USERCLIENT 1 | |
1627 | +#define DOMAIN_KERNELCLIENT 1 | |
1628 | #define DOMAIN_MANAGER 3 | |
1629 | +#define DOMAIN_VECTORS DOMAIN_USER | |
1630 | #else | |
1631 | + | |
1632 | +#ifdef CONFIG_PAX_KERNEXEC | |
1633 | #define DOMAIN_MANAGER 1 | |
1634 | +#define DOMAIN_KERNEXEC 3 | |
1635 | +#else | |
1636 | +#define DOMAIN_MANAGER 1 | |
1637 | +#endif | |
1638 | + | |
1639 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
1640 | +#define DOMAIN_USERCLIENT 0 | |
1641 | +#define DOMAIN_UDEREF 1 | |
1642 | +#define DOMAIN_VECTORS DOMAIN_KERNEL | |
1643 | +#else | |
1644 | +#define DOMAIN_USERCLIENT 1 | |
1645 | +#define DOMAIN_VECTORS DOMAIN_USER | |
1646 | +#endif | |
1647 | +#define DOMAIN_KERNELCLIENT 1 | |
1648 | + | |
1649 | #endif | |
1650 | ||
1651 | #define domain_val(dom,type) ((type) << (2*(dom))) | |
1652 | ||
1653 | #ifndef __ASSEMBLY__ | |
1654 | ||
1655 | -#ifdef CONFIG_CPU_USE_DOMAINS | |
1656 | +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
1657 | static inline void set_domain(unsigned val) | |
1658 | { | |
1659 | asm volatile( | |
1660 | @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val) | |
1661 | isb(); | |
1662 | } | |
1663 | ||
1664 | -#define modify_domain(dom,type) \ | |
1665 | - do { \ | |
1666 | - struct thread_info *thread = current_thread_info(); \ | |
1667 | - unsigned int domain = thread->cpu_domain; \ | |
1668 | - domain &= ~domain_val(dom, DOMAIN_MANAGER); \ | |
1669 | - thread->cpu_domain = domain | domain_val(dom, type); \ | |
1670 | - set_domain(thread->cpu_domain); \ | |
1671 | - } while (0) | |
1672 | - | |
1673 | +extern void modify_domain(unsigned int dom, unsigned int type); | |
1674 | #else | |
1675 | static inline void set_domain(unsigned val) { } | |
1676 | static inline void modify_domain(unsigned dom, unsigned type) { } | |
1677 | diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h | |
1678 | index afb9caf..9a0bac0 100644 | |
1679 | --- a/arch/arm/include/asm/elf.h | |
1680 | +++ b/arch/arm/include/asm/elf.h | |
1681 | @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); | |
1682 | the loader. We need to make sure that it is out of the way of the program | |
1683 | that it will "exec", and that there is sufficient room for the brk. */ | |
1684 | ||
1685 | -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | |
1686 | +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | |
1687 | + | |
1688 | +#ifdef CONFIG_PAX_ASLR | |
1689 | +#define PAX_ELF_ET_DYN_BASE 0x00008000UL | |
1690 | + | |
1691 | +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) | |
1692 | +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) | |
1693 | +#endif | |
1694 | ||
1695 | /* When the program starts, a1 contains a pointer to a function to be | |
1696 | registered with atexit, as per the SVR4 ABI. A value of 0 means we | |
1697 | @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); | |
1698 | extern void elf_set_personality(const struct elf32_hdr *); | |
1699 | #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) | |
1700 | ||
1701 | -struct mm_struct; | |
1702 | -extern unsigned long arch_randomize_brk(struct mm_struct *mm); | |
1703 | -#define arch_randomize_brk arch_randomize_brk | |
1704 | - | |
1705 | #ifdef CONFIG_MMU | |
1706 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | |
1707 | struct linux_binprm; | |
1708 | diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h | |
1709 | index de53547..52b9a28 100644 | |
1710 | --- a/arch/arm/include/asm/fncpy.h | |
1711 | +++ b/arch/arm/include/asm/fncpy.h | |
1712 | @@ -81,7 +81,9 @@ | |
1713 | BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ | |
1714 | (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ | |
1715 | \ | |
1716 | + pax_open_kernel(); \ | |
1717 | memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ | |
1718 | + pax_close_kernel(); \ | |
1719 | flush_icache_range((unsigned long)(dest_buf), \ | |
1720 | (unsigned long)(dest_buf) + (size)); \ | |
1721 | \ | |
1722 | diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h | |
1723 | index 53e69da..3fdc896 100644 | |
1724 | --- a/arch/arm/include/asm/futex.h | |
1725 | +++ b/arch/arm/include/asm/futex.h | |
1726 | @@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
1727 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
1728 | return -EFAULT; | |
1729 | ||
1730 | + pax_open_userland(); | |
1731 | + | |
1732 | smp_mb(); | |
1733 | /* Prefetching cannot fault */ | |
1734 | prefetchw(uaddr); | |
1735 | @@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
1736 | : "cc", "memory"); | |
1737 | smp_mb(); | |
1738 | ||
1739 | + pax_close_userland(); | |
1740 | + | |
1741 | *uval = val; | |
1742 | return ret; | |
1743 | } | |
1744 | @@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
1745 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
1746 | return -EFAULT; | |
1747 | ||
1748 | + pax_open_userland(); | |
1749 | + | |
1750 | __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" | |
1751 | "1: " TUSER(ldr) " %1, [%4]\n" | |
1752 | " teq %1, %2\n" | |
1753 | @@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
1754 | : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) | |
1755 | : "cc", "memory"); | |
1756 | ||
1757 | + pax_close_userland(); | |
1758 | + | |
1759 | *uval = val; | |
1760 | return ret; | |
1761 | } | |
1762 | @@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
1763 | return -EFAULT; | |
1764 | ||
1765 | pagefault_disable(); /* implies preempt_disable() */ | |
1766 | + pax_open_userland(); | |
1767 | ||
1768 | switch (op) { | |
1769 | case FUTEX_OP_SET: | |
1770 | @@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
1771 | ret = -ENOSYS; | |
1772 | } | |
1773 | ||
1774 | + pax_close_userland(); | |
1775 | pagefault_enable(); /* subsumes preempt_enable() */ | |
1776 | ||
1777 | if (!ret) { | |
1778 | diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h | |
1779 | index 83eb2f7..ed77159 100644 | |
1780 | --- a/arch/arm/include/asm/kmap_types.h | |
1781 | +++ b/arch/arm/include/asm/kmap_types.h | |
1782 | @@ -4,6 +4,6 @@ | |
1783 | /* | |
1784 | * This is the "bare minimum". AIO seems to require this. | |
1785 | */ | |
1786 | -#define KM_TYPE_NR 16 | |
1787 | +#define KM_TYPE_NR 17 | |
1788 | ||
1789 | #endif | |
1790 | diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h | |
1791 | index 9e614a1..3302cca 100644 | |
1792 | --- a/arch/arm/include/asm/mach/dma.h | |
1793 | +++ b/arch/arm/include/asm/mach/dma.h | |
1794 | @@ -22,7 +22,7 @@ struct dma_ops { | |
1795 | int (*residue)(unsigned int, dma_t *); /* optional */ | |
1796 | int (*setspeed)(unsigned int, dma_t *, int); /* optional */ | |
1797 | const char *type; | |
1798 | -}; | |
1799 | +} __do_const; | |
1800 | ||
1801 | struct dma_struct { | |
1802 | void *addr; /* single DMA address */ | |
1803 | diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h | |
1804 | index f98c7f3..e5c626d 100644 | |
1805 | --- a/arch/arm/include/asm/mach/map.h | |
1806 | +++ b/arch/arm/include/asm/mach/map.h | |
1807 | @@ -23,17 +23,19 @@ struct map_desc { | |
1808 | ||
1809 | /* types 0-3 are defined in asm/io.h */ | |
1810 | enum { | |
1811 | - MT_UNCACHED = 4, | |
1812 | - MT_CACHECLEAN, | |
1813 | - MT_MINICLEAN, | |
1814 | + MT_UNCACHED_RW = 4, | |
1815 | + MT_CACHECLEAN_RO, | |
1816 | + MT_MINICLEAN_RO, | |
1817 | MT_LOW_VECTORS, | |
1818 | MT_HIGH_VECTORS, | |
1819 | - MT_MEMORY_RWX, | |
1820 | + __MT_MEMORY_RWX, | |
1821 | MT_MEMORY_RW, | |
1822 | - MT_ROM, | |
1823 | - MT_MEMORY_RWX_NONCACHED, | |
1824 | + MT_MEMORY_RX, | |
1825 | + MT_ROM_RX, | |
1826 | + MT_MEMORY_RW_NONCACHED, | |
1827 | + MT_MEMORY_RX_NONCACHED, | |
1828 | MT_MEMORY_RW_DTCM, | |
1829 | - MT_MEMORY_RWX_ITCM, | |
1830 | + MT_MEMORY_RX_ITCM, | |
1831 | MT_MEMORY_RW_SO, | |
1832 | MT_MEMORY_DMA_READY, | |
1833 | }; | |
1834 | diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h | |
1835 | index 891a56b..48f337e 100644 | |
1836 | --- a/arch/arm/include/asm/outercache.h | |
1837 | +++ b/arch/arm/include/asm/outercache.h | |
1838 | @@ -36,7 +36,7 @@ struct outer_cache_fns { | |
1839 | ||
1840 | /* This is an ARM L2C thing */ | |
1841 | void (*write_sec)(unsigned long, unsigned); | |
1842 | -}; | |
1843 | +} __no_const; | |
1844 | ||
1845 | extern struct outer_cache_fns outer_cache; | |
1846 | ||
1847 | diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h | |
1848 | index 4355f0e..cd9168e 100644 | |
1849 | --- a/arch/arm/include/asm/page.h | |
1850 | +++ b/arch/arm/include/asm/page.h | |
1851 | @@ -23,6 +23,7 @@ | |
1852 | ||
1853 | #else | |
1854 | ||
1855 | +#include <linux/compiler.h> | |
1856 | #include <asm/glue.h> | |
1857 | ||
1858 | /* | |
1859 | @@ -114,7 +115,7 @@ struct cpu_user_fns { | |
1860 | void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); | |
1861 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, | |
1862 | unsigned long vaddr, struct vm_area_struct *vma); | |
1863 | -}; | |
1864 | +} __no_const; | |
1865 | ||
1866 | #ifdef MULTI_USER | |
1867 | extern struct cpu_user_fns cpu_user; | |
1868 | diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h | |
1869 | index 78a7793..e3dc06c 100644 | |
1870 | --- a/arch/arm/include/asm/pgalloc.h | |
1871 | +++ b/arch/arm/include/asm/pgalloc.h | |
1872 | @@ -17,6 +17,7 @@ | |
1873 | #include <asm/processor.h> | |
1874 | #include <asm/cacheflush.h> | |
1875 | #include <asm/tlbflush.h> | |
1876 | +#include <asm/system_info.h> | |
1877 | ||
1878 | #define check_pgt_cache() do { } while (0) | |
1879 | ||
1880 | @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
1881 | set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); | |
1882 | } | |
1883 | ||
1884 | +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
1885 | +{ | |
1886 | + pud_populate(mm, pud, pmd); | |
1887 | +} | |
1888 | + | |
1889 | #else /* !CONFIG_ARM_LPAE */ | |
1890 | ||
1891 | /* | |
1892 | @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
1893 | #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) | |
1894 | #define pmd_free(mm, pmd) do { } while (0) | |
1895 | #define pud_populate(mm,pmd,pte) BUG() | |
1896 | +#define pud_populate_kernel(mm,pmd,pte) BUG() | |
1897 | ||
1898 | #endif /* CONFIG_ARM_LPAE */ | |
1899 | ||
1900 | @@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |
1901 | __free_page(pte); | |
1902 | } | |
1903 | ||
1904 | +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot) | |
1905 | +{ | |
1906 | +#ifdef CONFIG_ARM_LPAE | |
1907 | + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); | |
1908 | +#else | |
1909 | + if (addr & SECTION_SIZE) | |
1910 | + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot); | |
1911 | + else | |
1912 | + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); | |
1913 | +#endif | |
1914 | + flush_pmd_entry(pmdp); | |
1915 | +} | |
1916 | + | |
1917 | static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, | |
1918 | pmdval_t prot) | |
1919 | { | |
1920 | @@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | |
1921 | static inline void | |
1922 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) | |
1923 | { | |
1924 | - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); | |
1925 | + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask); | |
1926 | } | |
1927 | #define pmd_pgtable(pmd) pmd_page(pmd) | |
1928 | ||
1929 | diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h | |
1930 | index 5cfba15..f415e1a 100644 | |
1931 | --- a/arch/arm/include/asm/pgtable-2level-hwdef.h | |
1932 | +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h | |
1933 | @@ -20,12 +20,15 @@ | |
1934 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) | |
1935 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) | |
1936 | #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) | |
1937 | +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ | |
1938 | #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) | |
1939 | #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) | |
1940 | #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ | |
1941 | + | |
1942 | /* | |
1943 | * - section | |
1944 | */ | |
1945 | +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ | |
1946 | #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) | |
1947 | #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) | |
1948 | #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ | |
1949 | @@ -37,6 +40,7 @@ | |
1950 | #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ | |
1951 | #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ | |
1952 | #define PMD_SECT_AF (_AT(pmdval_t, 0)) | |
1953 | +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0)) | |
1954 | ||
1955 | #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) | |
1956 | #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) | |
1957 | @@ -66,6 +70,7 @@ | |
1958 | * - extended small page/tiny page | |
1959 | */ | |
1960 | #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ | |
1961 | +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */ | |
1962 | #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) | |
1963 | #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) | |
1964 | #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) | |
1965 | diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h | |
1966 | index f027941..f36ce30 100644 | |
1967 | --- a/arch/arm/include/asm/pgtable-2level.h | |
1968 | +++ b/arch/arm/include/asm/pgtable-2level.h | |
1969 | @@ -126,6 +126,9 @@ | |
1970 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ | |
1971 | #define L_PTE_NONE (_AT(pteval_t, 1) << 11) | |
1972 | ||
1973 | +/* Two-level page tables only have PXN in the PGD, not in the PTE. */ | |
1974 | +#define L_PTE_PXN (_AT(pteval_t, 0)) | |
1975 | + | |
1976 | /* | |
1977 | * These are the memory types, defined to be compatible with | |
1978 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB | |
1979 | diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h | |
1980 | index 9fd61c7..f8f1cff 100644 | |
1981 | --- a/arch/arm/include/asm/pgtable-3level-hwdef.h | |
1982 | +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h | |
1983 | @@ -76,6 +76,7 @@ | |
1984 | #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | |
1985 | #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ | |
1986 | #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ | |
1987 | +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ | |
1988 | #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ | |
1989 | ||
1990 | /* | |
1991 | diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h | |
1992 | index a31ecdad..95e98d4 100644 | |
1993 | --- a/arch/arm/include/asm/pgtable-3level.h | |
1994 | +++ b/arch/arm/include/asm/pgtable-3level.h | |
1995 | @@ -81,6 +81,7 @@ | |
1996 | #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ | |
1997 | #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | |
1998 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ | |
1999 | +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */ | |
2000 | #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ | |
2001 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) | |
2002 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) | |
2003 | @@ -92,10 +93,12 @@ | |
2004 | #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) | |
2005 | #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) | |
2006 | #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58) | |
2007 | +#define PMD_SECT_RDONLY PMD_SECT_AP2 | |
2008 | ||
2009 | /* | |
2010 | * To be used in assembly code with the upper page attributes. | |
2011 | */ | |
2012 | +#define L_PTE_PXN_HIGH (1 << (53 - 32)) | |
2013 | #define L_PTE_XN_HIGH (1 << (54 - 32)) | |
2014 | #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) | |
2015 | ||
2016 | diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h | |
2017 | index 3b30062..01a5f9d 100644 | |
2018 | --- a/arch/arm/include/asm/pgtable.h | |
2019 | +++ b/arch/arm/include/asm/pgtable.h | |
2020 | @@ -33,6 +33,9 @@ | |
2021 | #include <asm/pgtable-2level.h> | |
2022 | #endif | |
2023 | ||
2024 | +#define ktla_ktva(addr) (addr) | |
2025 | +#define ktva_ktla(addr) (addr) | |
2026 | + | |
2027 | /* | |
2028 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
2029 | * current 8MB value just means that there will be a 8MB "hole" after the | |
2030 | @@ -48,6 +51,9 @@ | |
2031 | #define LIBRARY_TEXT_START 0x0c000000 | |
2032 | ||
2033 | #ifndef __ASSEMBLY__ | |
2034 | +extern pteval_t __supported_pte_mask; | |
2035 | +extern pmdval_t __supported_pmd_mask; | |
2036 | + | |
2037 | extern void __pte_error(const char *file, int line, pte_t); | |
2038 | extern void __pmd_error(const char *file, int line, pmd_t); | |
2039 | extern void __pgd_error(const char *file, int line, pgd_t); | |
2040 | @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t); | |
2041 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) | |
2042 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) | |
2043 | ||
2044 | +#define __HAVE_ARCH_PAX_OPEN_KERNEL | |
2045 | +#define __HAVE_ARCH_PAX_CLOSE_KERNEL | |
2046 | + | |
2047 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2048 | +#include <asm/domain.h> | |
2049 | +#include <linux/thread_info.h> | |
2050 | +#include <linux/preempt.h> | |
2051 | + | |
2052 | +static inline int test_domain(int domain, int domaintype) | |
2053 | +{ | |
2054 | + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype); | |
2055 | +} | |
2056 | +#endif | |
2057 | + | |
2058 | +#ifdef CONFIG_PAX_KERNEXEC | |
2059 | +static inline unsigned long pax_open_kernel(void) { | |
2060 | +#ifdef CONFIG_ARM_LPAE | |
2061 | + /* TODO */ | |
2062 | +#else | |
2063 | + preempt_disable(); | |
2064 | + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC)); | |
2065 | + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC); | |
2066 | +#endif | |
2067 | + return 0; | |
2068 | +} | |
2069 | + | |
2070 | +static inline unsigned long pax_close_kernel(void) { | |
2071 | +#ifdef CONFIG_ARM_LPAE | |
2072 | + /* TODO */ | |
2073 | +#else | |
2074 | + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER)); | |
2075 | + /* DOMAIN_MANAGER = "client" under KERNEXEC */ | |
2076 | + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER); | |
2077 | + preempt_enable_no_resched(); | |
2078 | +#endif | |
2079 | + return 0; | |
2080 | +} | |
2081 | +#else | |
2082 | +static inline unsigned long pax_open_kernel(void) { return 0; } | |
2083 | +static inline unsigned long pax_close_kernel(void) { return 0; } | |
2084 | +#endif | |
2085 | + | |
2086 | /* | |
2087 | * This is the lowest virtual address we can permit any user space | |
2088 | * mapping to be mapped at. This is particularly important for | |
2089 | @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t); | |
2090 | /* | |
2091 | * The pgprot_* and protection_map entries will be fixed up in runtime | |
2092 | * to include the cachable and bufferable bits based on memory policy, | |
2093 | - * as well as any architecture dependent bits like global/ASID and SMP | |
2094 | - * shared mapping bits. | |
2095 | + * as well as any architecture dependent bits like global/ASID, PXN, | |
2096 | + * and SMP shared mapping bits. | |
2097 | */ | |
2098 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | |
2099 | ||
2100 | @@ -267,7 +315,7 @@ PTE_BIT_FUNC(mknexec, |= L_PTE_XN); | |
2101 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
2102 | { | |
2103 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | | |
2104 | - L_PTE_NONE | L_PTE_VALID; | |
2105 | + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask; | |
2106 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | |
2107 | return pte; | |
2108 | } | |
2109 | diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h | |
2110 | index c25ef3e..735f14b 100644 | |
2111 | --- a/arch/arm/include/asm/psci.h | |
2112 | +++ b/arch/arm/include/asm/psci.h | |
2113 | @@ -32,7 +32,7 @@ struct psci_operations { | |
2114 | int (*affinity_info)(unsigned long target_affinity, | |
2115 | unsigned long lowest_affinity_level); | |
2116 | int (*migrate_info_type)(void); | |
2117 | -}; | |
2118 | +} __no_const; | |
2119 | ||
2120 | extern struct psci_operations psci_ops; | |
2121 | extern struct smp_operations psci_smp_ops; | |
2122 | diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h | |
2123 | index 18f5a55..5072a40 100644 | |
2124 | --- a/arch/arm/include/asm/smp.h | |
2125 | +++ b/arch/arm/include/asm/smp.h | |
2126 | @@ -107,7 +107,7 @@ struct smp_operations { | |
2127 | int (*cpu_disable)(unsigned int cpu); | |
2128 | #endif | |
2129 | #endif | |
2130 | -}; | |
2131 | +} __no_const; | |
2132 | ||
2133 | struct of_cpu_method { | |
2134 | const char *method; | |
2135 | diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h | |
2136 | index ce73ab6..7310f8a 100644 | |
2137 | --- a/arch/arm/include/asm/thread_info.h | |
2138 | +++ b/arch/arm/include/asm/thread_info.h | |
2139 | @@ -78,9 +78,9 @@ struct thread_info { | |
2140 | .flags = 0, \ | |
2141 | .preempt_count = INIT_PREEMPT_COUNT, \ | |
2142 | .addr_limit = KERNEL_DS, \ | |
2143 | - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | |
2144 | - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | |
2145 | - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ | |
2146 | + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ | |
2147 | + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \ | |
2148 | + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \ | |
2149 | .restart_block = { \ | |
2150 | .fn = do_no_restart_syscall, \ | |
2151 | }, \ | |
2152 | @@ -154,7 +154,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |
2153 | #define TIF_SYSCALL_AUDIT 9 | |
2154 | #define TIF_SYSCALL_TRACEPOINT 10 | |
2155 | #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ | |
2156 | -#define TIF_NOHZ 12 /* in adaptive nohz mode */ | |
2157 | +/* within 8 bits of TIF_SYSCALL_TRACE | |
2158 | + * to meet flexible second operand requirements | |
2159 | + */ | |
2160 | +#define TIF_GRSEC_SETXID 12 | |
2161 | +#define TIF_NOHZ 13 /* in adaptive nohz mode */ | |
2162 | #define TIF_USING_IWMMXT 17 | |
2163 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | |
2164 | #define TIF_RESTORE_SIGMASK 20 | |
2165 | @@ -168,10 +172,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |
2166 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | |
2167 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
2168 | #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) | |
2169 | +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) | |
2170 | ||
2171 | /* Checks for any syscall work in entry-common.S */ | |
2172 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | |
2173 | - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) | |
2174 | + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) | |
2175 | ||
2176 | /* | |
2177 | * Change these and you break ASM code in entry-common.S | |
2178 | diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h | |
2179 | index 5f833f7..76e6644 100644 | |
2180 | --- a/arch/arm/include/asm/tls.h | |
2181 | +++ b/arch/arm/include/asm/tls.h | |
2182 | @@ -3,6 +3,7 @@ | |
2183 | ||
2184 | #include <linux/compiler.h> | |
2185 | #include <asm/thread_info.h> | |
2186 | +#include <asm/pgtable.h> | |
2187 | ||
2188 | #ifdef __ASSEMBLY__ | |
2189 | #include <asm/asm-offsets.h> | |
2190 | @@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val) | |
2191 | * at 0xffff0fe0 must be used instead. (see | |
2192 | * entry-armv.S for details) | |
2193 | */ | |
2194 | + pax_open_kernel(); | |
2195 | *((unsigned int *)0xffff0ff0) = val; | |
2196 | + pax_close_kernel(); | |
2197 | #endif | |
2198 | } | |
2199 | ||
2200 | diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h | |
2201 | index 4767eb9..bf00668 100644 | |
2202 | --- a/arch/arm/include/asm/uaccess.h | |
2203 | +++ b/arch/arm/include/asm/uaccess.h | |
2204 | @@ -18,6 +18,7 @@ | |
2205 | #include <asm/domain.h> | |
2206 | #include <asm/unified.h> | |
2207 | #include <asm/compiler.h> | |
2208 | +#include <asm/pgtable.h> | |
2209 | ||
2210 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | |
2211 | #include <asm-generic/uaccess-unaligned.h> | |
2212 | @@ -70,11 +71,38 @@ extern int __put_user_bad(void); | |
2213 | static inline void set_fs(mm_segment_t fs) | |
2214 | { | |
2215 | current_thread_info()->addr_limit = fs; | |
2216 | - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); | |
2217 | + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); | |
2218 | } | |
2219 | ||
2220 | #define segment_eq(a,b) ((a) == (b)) | |
2221 | ||
2222 | +#define __HAVE_ARCH_PAX_OPEN_USERLAND | |
2223 | +#define __HAVE_ARCH_PAX_CLOSE_USERLAND | |
2224 | + | |
2225 | +static inline void pax_open_userland(void) | |
2226 | +{ | |
2227 | + | |
2228 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2229 | + if (segment_eq(get_fs(), USER_DS)) { | |
2230 | + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); | |
2231 | + modify_domain(DOMAIN_USER, DOMAIN_UDEREF); | |
2232 | + } | |
2233 | +#endif | |
2234 | + | |
2235 | +} | |
2236 | + | |
2237 | +static inline void pax_close_userland(void) | |
2238 | +{ | |
2239 | + | |
2240 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2241 | + if (segment_eq(get_fs(), USER_DS)) { | |
2242 | + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); | |
2243 | + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); | |
2244 | + } | |
2245 | +#endif | |
2246 | + | |
2247 | +} | |
2248 | + | |
2249 | #define __addr_ok(addr) ({ \ | |
2250 | unsigned long flag; \ | |
2251 | __asm__("cmp %2, %0; movlo %0, #0" \ | |
2252 | @@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *); | |
2253 | ||
2254 | #define get_user(x,p) \ | |
2255 | ({ \ | |
2256 | + int __e; \ | |
2257 | might_fault(); \ | |
2258 | - __get_user_check(x,p); \ | |
2259 | + pax_open_userland(); \ | |
2260 | + __e = __get_user_check(x,p); \ | |
2261 | + pax_close_userland(); \ | |
2262 | + __e; \ | |
2263 | }) | |
2264 | ||
2265 | extern int __put_user_1(void *, unsigned int); | |
2266 | @@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long); | |
2267 | ||
2268 | #define put_user(x,p) \ | |
2269 | ({ \ | |
2270 | + int __e; \ | |
2271 | might_fault(); \ | |
2272 | - __put_user_check(x,p); \ | |
2273 | + pax_open_userland(); \ | |
2274 | + __e = __put_user_check(x,p); \ | |
2275 | + pax_close_userland(); \ | |
2276 | + __e; \ | |
2277 | }) | |
2278 | ||
2279 | #else /* CONFIG_MMU */ | |
2280 | @@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs) | |
2281 | ||
2282 | #endif /* CONFIG_MMU */ | |
2283 | ||
2284 | +#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size)) | |
2285 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | |
2286 | ||
2287 | #define user_addr_max() \ | |
2288 | @@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs) | |
2289 | #define __get_user(x,ptr) \ | |
2290 | ({ \ | |
2291 | long __gu_err = 0; \ | |
2292 | + pax_open_userland(); \ | |
2293 | __get_user_err((x),(ptr),__gu_err); \ | |
2294 | + pax_close_userland(); \ | |
2295 | __gu_err; \ | |
2296 | }) | |
2297 | ||
2298 | #define __get_user_error(x,ptr,err) \ | |
2299 | ({ \ | |
2300 | + pax_open_userland(); \ | |
2301 | __get_user_err((x),(ptr),err); \ | |
2302 | + pax_close_userland(); \ | |
2303 | (void) 0; \ | |
2304 | }) | |
2305 | ||
2306 | @@ -368,13 +409,17 @@ do { \ | |
2307 | #define __put_user(x,ptr) \ | |
2308 | ({ \ | |
2309 | long __pu_err = 0; \ | |
2310 | + pax_open_userland(); \ | |
2311 | __put_user_err((x),(ptr),__pu_err); \ | |
2312 | + pax_close_userland(); \ | |
2313 | __pu_err; \ | |
2314 | }) | |
2315 | ||
2316 | #define __put_user_error(x,ptr,err) \ | |
2317 | ({ \ | |
2318 | + pax_open_userland(); \ | |
2319 | __put_user_err((x),(ptr),err); \ | |
2320 | + pax_close_userland(); \ | |
2321 | (void) 0; \ | |
2322 | }) | |
2323 | ||
2324 | @@ -474,11 +519,44 @@ do { \ | |
2325 | ||
2326 | ||
2327 | #ifdef CONFIG_MMU | |
2328 | -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); | |
2329 | -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); | |
2330 | +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); | |
2331 | +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); | |
2332 | + | |
2333 | +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) | |
2334 | +{ | |
2335 | + unsigned long ret; | |
2336 | + | |
2337 | + check_object_size(to, n, false); | |
2338 | + pax_open_userland(); | |
2339 | + ret = ___copy_from_user(to, from, n); | |
2340 | + pax_close_userland(); | |
2341 | + return ret; | |
2342 | +} | |
2343 | + | |
2344 | +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) | |
2345 | +{ | |
2346 | + unsigned long ret; | |
2347 | + | |
2348 | + check_object_size(from, n, true); | |
2349 | + pax_open_userland(); | |
2350 | + ret = ___copy_to_user(to, from, n); | |
2351 | + pax_close_userland(); | |
2352 | + return ret; | |
2353 | +} | |
2354 | + | |
2355 | extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); | |
2356 | -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); | |
2357 | +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n); | |
2358 | extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); | |
2359 | + | |
2360 | +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n) | |
2361 | +{ | |
2362 | + unsigned long ret; | |
2363 | + pax_open_userland(); | |
2364 | + ret = ___clear_user(addr, n); | |
2365 | + pax_close_userland(); | |
2366 | + return ret; | |
2367 | +} | |
2368 | + | |
2369 | #else | |
2370 | #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) | |
2371 | #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) | |
2372 | @@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l | |
2373 | ||
2374 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | |
2375 | { | |
2376 | + if ((long)n < 0) | |
2377 | + return n; | |
2378 | + | |
2379 | if (access_ok(VERIFY_READ, from, n)) | |
2380 | n = __copy_from_user(to, from, n); | |
2381 | else /* security hole - plug it */ | |
2382 | @@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u | |
2383 | ||
2384 | static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) | |
2385 | { | |
2386 | + if ((long)n < 0) | |
2387 | + return n; | |
2388 | + | |
2389 | if (access_ok(VERIFY_WRITE, to, n)) | |
2390 | n = __copy_to_user(to, from, n); | |
2391 | return n; | |
2392 | diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h | |
2393 | index 5af0ed1..cea83883 100644 | |
2394 | --- a/arch/arm/include/uapi/asm/ptrace.h | |
2395 | +++ b/arch/arm/include/uapi/asm/ptrace.h | |
2396 | @@ -92,7 +92,7 @@ | |
2397 | * ARMv7 groups of PSR bits | |
2398 | */ | |
2399 | #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ | |
2400 | -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ | |
2401 | +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */ | |
2402 | #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | |
2403 | #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ | |
2404 | ||
2405 | diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c | |
2406 | index a88671c..1cc895e 100644 | |
2407 | --- a/arch/arm/kernel/armksyms.c | |
2408 | +++ b/arch/arm/kernel/armksyms.c | |
2409 | @@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops); | |
2410 | ||
2411 | /* networking */ | |
2412 | EXPORT_SYMBOL(csum_partial); | |
2413 | -EXPORT_SYMBOL(csum_partial_copy_from_user); | |
2414 | +EXPORT_SYMBOL(__csum_partial_copy_from_user); | |
2415 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | |
2416 | EXPORT_SYMBOL(__csum_ipv6_magic); | |
2417 | ||
2418 | @@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero); | |
2419 | #ifdef CONFIG_MMU | |
2420 | EXPORT_SYMBOL(copy_page); | |
2421 | ||
2422 | -EXPORT_SYMBOL(__copy_from_user); | |
2423 | -EXPORT_SYMBOL(__copy_to_user); | |
2424 | -EXPORT_SYMBOL(__clear_user); | |
2425 | +EXPORT_SYMBOL(___copy_from_user); | |
2426 | +EXPORT_SYMBOL(___copy_to_user); | |
2427 | +EXPORT_SYMBOL(___clear_user); | |
2428 | ||
2429 | EXPORT_SYMBOL(__get_user_1); | |
2430 | EXPORT_SYMBOL(__get_user_2); | |
2431 | diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S | |
2432 | index 2f5555d..d493c91 100644 | |
2433 | --- a/arch/arm/kernel/entry-armv.S | |
2434 | +++ b/arch/arm/kernel/entry-armv.S | |
2435 | @@ -47,6 +47,87 @@ | |
2436 | 9997: | |
2437 | .endm | |
2438 | ||
2439 | + .macro pax_enter_kernel | |
2440 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2441 | + @ make aligned space for saved DACR | |
2442 | + sub sp, sp, #8 | |
2443 | + @ save regs | |
2444 | + stmdb sp!, {r1, r2} | |
2445 | + @ read DACR from cpu_domain into r1 | |
2446 | + mov r2, sp | |
2447 | + @ assume 8K pages, since we have to split the immediate in two | |
2448 | + bic r2, r2, #(0x1fc0) | |
2449 | + bic r2, r2, #(0x3f) | |
2450 | + ldr r1, [r2, #TI_CPU_DOMAIN] | |
2451 | + @ store old DACR on stack | |
2452 | + str r1, [sp, #8] | |
2453 | +#ifdef CONFIG_PAX_KERNEXEC | |
2454 | + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT | |
2455 | + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) | |
2456 | + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) | |
2457 | +#endif | |
2458 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2459 | + @ set current DOMAIN_USER to DOMAIN_NOACCESS | |
2460 | + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) | |
2461 | +#endif | |
2462 | + @ write r1 to current_thread_info()->cpu_domain | |
2463 | + str r1, [r2, #TI_CPU_DOMAIN] | |
2464 | + @ write r1 to DACR | |
2465 | + mcr p15, 0, r1, c3, c0, 0 | |
2466 | + @ instruction sync | |
2467 | + instr_sync | |
2468 | + @ restore regs | |
2469 | + ldmia sp!, {r1, r2} | |
2470 | +#endif | |
2471 | + .endm | |
2472 | + | |
2473 | + .macro pax_open_userland | |
2474 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2475 | + @ save regs | |
2476 | + stmdb sp!, {r0, r1} | |
2477 | + @ read DACR from cpu_domain into r1 | |
2478 | + mov r0, sp | |
2479 | + @ assume 8K pages, since we have to split the immediate in two | |
2480 | + bic r0, r0, #(0x1fc0) | |
2481 | + bic r0, r0, #(0x3f) | |
2482 | + ldr r1, [r0, #TI_CPU_DOMAIN] | |
2483 | + @ set current DOMAIN_USER to DOMAIN_CLIENT | |
2484 | + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) | |
2485 | + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) | |
2486 | + @ write r1 to current_thread_info()->cpu_domain | |
2487 | + str r1, [r0, #TI_CPU_DOMAIN] | |
2488 | + @ write r1 to DACR | |
2489 | + mcr p15, 0, r1, c3, c0, 0 | |
2490 | + @ instruction sync | |
2491 | + instr_sync | |
2492 | + @ restore regs | |
2493 | + ldmia sp!, {r0, r1} | |
2494 | +#endif | |
2495 | + .endm | |
2496 | + | |
2497 | + .macro pax_close_userland | |
2498 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2499 | + @ save regs | |
2500 | + stmdb sp!, {r0, r1} | |
2501 | + @ read DACR from cpu_domain into r1 | |
2502 | + mov r0, sp | |
2503 | + @ assume 8K pages, since we have to split the immediate in two | |
2504 | + bic r0, r0, #(0x1fc0) | |
2505 | + bic r0, r0, #(0x3f) | |
2506 | + ldr r1, [r0, #TI_CPU_DOMAIN] | |
2507 | + @ set current DOMAIN_USER to DOMAIN_NOACCESS | |
2508 | + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) | |
2509 | + @ write r1 to current_thread_info()->cpu_domain | |
2510 | + str r1, [r0, #TI_CPU_DOMAIN] | |
2511 | + @ write r1 to DACR | |
2512 | + mcr p15, 0, r1, c3, c0, 0 | |
2513 | + @ instruction sync | |
2514 | + instr_sync | |
2515 | + @ restore regs | |
2516 | + ldmia sp!, {r0, r1} | |
2517 | +#endif | |
2518 | + .endm | |
2519 | + | |
2520 | .macro pabt_helper | |
2521 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 | |
2522 | #ifdef MULTI_PABORT | |
2523 | @@ -89,11 +170,15 @@ | |
2524 | * Invalid mode handlers | |
2525 | */ | |
2526 | .macro inv_entry, reason | |
2527 | + | |
2528 | + pax_enter_kernel | |
2529 | + | |
2530 | sub sp, sp, #S_FRAME_SIZE | |
2531 | ARM( stmib sp, {r1 - lr} ) | |
2532 | THUMB( stmia sp, {r0 - r12} ) | |
2533 | THUMB( str sp, [sp, #S_SP] ) | |
2534 | THUMB( str lr, [sp, #S_LR] ) | |
2535 | + | |
2536 | mov r1, #\reason | |
2537 | .endm | |
2538 | ||
2539 | @@ -149,7 +234,11 @@ ENDPROC(__und_invalid) | |
2540 | .macro svc_entry, stack_hole=0, trace=1 | |
2541 | UNWIND(.fnstart ) | |
2542 | UNWIND(.save {r0 - pc} ) | |
2543 | + | |
2544 | + pax_enter_kernel | |
2545 | + | |
2546 | sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) | |
2547 | + | |
2548 | #ifdef CONFIG_THUMB2_KERNEL | |
2549 | SPFIX( str r0, [sp] ) @ temporarily saved | |
2550 | SPFIX( mov r0, sp ) | |
2551 | @@ -164,7 +253,12 @@ ENDPROC(__und_invalid) | |
2552 | ldmia r0, {r3 - r5} | |
2553 | add r7, sp, #S_SP - 4 @ here for interlock avoidance | |
2554 | mov r6, #-1 @ "" "" "" "" | |
2555 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2556 | + @ offset sp by 8 as done in pax_enter_kernel | |
2557 | + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4) | |
2558 | +#else | |
2559 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) | |
2560 | +#endif | |
2561 | SPFIX( addeq r2, r2, #4 ) | |
2562 | str r3, [sp, #-4]! @ save the "real" r0 copied | |
2563 | @ from the exception stack | |
2564 | @@ -368,6 +462,9 @@ ENDPROC(__fiq_abt) | |
2565 | .macro usr_entry, trace=1 | |
2566 | UNWIND(.fnstart ) | |
2567 | UNWIND(.cantunwind ) @ don't unwind the user space | |
2568 | + | |
2569 | + pax_enter_kernel_user | |
2570 | + | |
2571 | sub sp, sp, #S_FRAME_SIZE | |
2572 | ARM( stmib sp, {r1 - r12} ) | |
2573 | THUMB( stmia sp, {r0 - r12} ) | |
2574 | @@ -478,7 +575,9 @@ __und_usr: | |
2575 | tst r3, #PSR_T_BIT @ Thumb mode? | |
2576 | bne __und_usr_thumb | |
2577 | sub r4, r2, #4 @ ARM instr at LR - 4 | |
2578 | + pax_open_userland | |
2579 | 1: ldrt r0, [r4] | |
2580 | + pax_close_userland | |
2581 | ARM_BE8(rev r0, r0) @ little endian instruction | |
2582 | ||
2583 | @ r0 = 32-bit ARM instruction which caused the exception | |
2584 | @@ -512,11 +611,15 @@ __und_usr_thumb: | |
2585 | */ | |
2586 | .arch armv6t2 | |
2587 | #endif | |
2588 | + pax_open_userland | |
2589 | 2: ldrht r5, [r4] | |
2590 | + pax_close_userland | |
2591 | ARM_BE8(rev16 r5, r5) @ little endian instruction | |
2592 | cmp r5, #0xe800 @ 32bit instruction if xx != 0 | |
2593 | blo __und_usr_fault_16 @ 16bit undefined instruction | |
2594 | + pax_open_userland | |
2595 | 3: ldrht r0, [r2] | |
2596 | + pax_close_userland | |
2597 | ARM_BE8(rev16 r0, r0) @ little endian instruction | |
2598 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 | |
2599 | str r2, [sp, #S_PC] @ it's a 2x16bit instr, update | |
2600 | @@ -546,7 +649,8 @@ ENDPROC(__und_usr) | |
2601 | */ | |
2602 | .pushsection .fixup, "ax" | |
2603 | .align 2 | |
2604 | -4: str r4, [sp, #S_PC] @ retry current instruction | |
2605 | +4: pax_close_userland | |
2606 | + str r4, [sp, #S_PC] @ retry current instruction | |
2607 | ret r9 | |
2608 | .popsection | |
2609 | .pushsection __ex_table,"a" | |
2610 | @@ -766,7 +870,7 @@ ENTRY(__switch_to) | |
2611 | THUMB( str lr, [ip], #4 ) | |
2612 | ldr r4, [r2, #TI_TP_VALUE] | |
2613 | ldr r5, [r2, #TI_TP_VALUE + 4] | |
2614 | -#ifdef CONFIG_CPU_USE_DOMAINS | |
2615 | +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2616 | ldr r6, [r2, #TI_CPU_DOMAIN] | |
2617 | #endif | |
2618 | switch_tls r1, r4, r5, r3, r7 | |
2619 | @@ -775,7 +879,7 @@ ENTRY(__switch_to) | |
2620 | ldr r8, =__stack_chk_guard | |
2621 | ldr r7, [r7, #TSK_STACK_CANARY] | |
2622 | #endif | |
2623 | -#ifdef CONFIG_CPU_USE_DOMAINS | |
2624 | +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2625 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register | |
2626 | #endif | |
2627 | mov r5, r0 | |
2628 | diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S | |
2629 | index 6bb09d4..113e875 100644 | |
2630 | --- a/arch/arm/kernel/entry-common.S | |
2631 | +++ b/arch/arm/kernel/entry-common.S | |
2632 | @@ -11,18 +11,46 @@ | |
2633 | #include <asm/assembler.h> | |
2634 | #include <asm/unistd.h> | |
2635 | #include <asm/ftrace.h> | |
2636 | +#include <asm/domain.h> | |
2637 | #include <asm/unwind.h> | |
2638 | ||
2639 | +#include "entry-header.S" | |
2640 | + | |
2641 | #ifdef CONFIG_NEED_RET_TO_USER | |
2642 | #include <mach/entry-macro.S> | |
2643 | #else | |
2644 | .macro arch_ret_to_user, tmp1, tmp2 | |
2645 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2646 | + @ save regs | |
2647 | + stmdb sp!, {r1, r2} | |
2648 | + @ read DACR from cpu_domain into r1 | |
2649 | + mov r2, sp | |
2650 | + @ assume 8K pages, since we have to split the immediate in two | |
2651 | + bic r2, r2, #(0x1fc0) | |
2652 | + bic r2, r2, #(0x3f) | |
2653 | + ldr r1, [r2, #TI_CPU_DOMAIN] | |
2654 | +#ifdef CONFIG_PAX_KERNEXEC | |
2655 | + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT | |
2656 | + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) | |
2657 | + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) | |
2658 | +#endif | |
2659 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2660 | + @ set current DOMAIN_USER to DOMAIN_UDEREF | |
2661 | + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) | |
2662 | + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) | |
2663 | +#endif | |
2664 | + @ write r1 to current_thread_info()->cpu_domain | |
2665 | + str r1, [r2, #TI_CPU_DOMAIN] | |
2666 | + @ write r1 to DACR | |
2667 | + mcr p15, 0, r1, c3, c0, 0 | |
2668 | + @ instruction sync | |
2669 | + instr_sync | |
2670 | + @ restore regs | |
2671 | + ldmia sp!, {r1, r2} | |
2672 | +#endif | |
2673 | .endm | |
2674 | #endif | |
2675 | ||
2676 | -#include "entry-header.S" | |
2677 | - | |
2678 | - | |
2679 | .align 5 | |
2680 | /* | |
2681 | * This is the fast syscall return path. We do as little as | |
2682 | @@ -406,6 +434,12 @@ ENTRY(vector_swi) | |
2683 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction | |
2684 | #endif | |
2685 | ||
2686 | + /* | |
2687 | + * do this here to avoid a performance hit of wrapping the code above | |
2688 | + * that directly dereferences userland to parse the SWI instruction | |
2689 | + */ | |
2690 | + pax_enter_kernel_user | |
2691 | + | |
2692 | adr tbl, sys_call_table @ load syscall table pointer | |
2693 | ||
2694 | #if defined(CONFIG_OABI_COMPAT) | |
2695 | diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S | |
2696 | index 4176df7..a901f8d 100644 | |
2697 | --- a/arch/arm/kernel/entry-header.S | |
2698 | +++ b/arch/arm/kernel/entry-header.S | |
2699 | @@ -196,6 +196,60 @@ | |
2700 | msr cpsr_c, \rtemp @ switch back to the SVC mode | |
2701 | .endm | |
2702 | ||
2703 | + .macro pax_enter_kernel_user | |
2704 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2705 | + @ save regs | |
2706 | + stmdb sp!, {r0, r1} | |
2707 | + @ read DACR from cpu_domain into r1 | |
2708 | + mov r0, sp | |
2709 | + @ assume 8K pages, since we have to split the immediate in two | |
2710 | + bic r0, r0, #(0x1fc0) | |
2711 | + bic r0, r0, #(0x3f) | |
2712 | + ldr r1, [r0, #TI_CPU_DOMAIN] | |
2713 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
2714 | + @ set current DOMAIN_USER to DOMAIN_NOACCESS | |
2715 | + bic r1, r1, #(domain_val(DOMAIN_USER, 3)) | |
2716 | +#endif | |
2717 | +#ifdef CONFIG_PAX_KERNEXEC | |
2718 | + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT | |
2719 | + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) | |
2720 | + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) | |
2721 | +#endif | |
2722 | + @ write r1 to current_thread_info()->cpu_domain | |
2723 | + str r1, [r0, #TI_CPU_DOMAIN] | |
2724 | + @ write r1 to DACR | |
2725 | + mcr p15, 0, r1, c3, c0, 0 | |
2726 | + @ instruction sync | |
2727 | + instr_sync | |
2728 | + @ restore regs | |
2729 | + ldmia sp!, {r0, r1} | |
2730 | +#endif | |
2731 | + .endm | |
2732 | + | |
2733 | + .macro pax_exit_kernel | |
2734 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
2735 | + @ save regs | |
2736 | + stmdb sp!, {r0, r1} | |
2737 | + @ read old DACR from stack into r1 | |
2738 | + ldr r1, [sp, #(8 + S_SP)] | |
2739 | + sub r1, r1, #8 | |
2740 | + ldr r1, [r1] | |
2741 | + | |
2742 | + @ write r1 to current_thread_info()->cpu_domain | |
2743 | + mov r0, sp | |
2744 | + @ assume 8K pages, since we have to split the immediate in two | |
2745 | + bic r0, r0, #(0x1fc0) | |
2746 | + bic r0, r0, #(0x3f) | |
2747 | + str r1, [r0, #TI_CPU_DOMAIN] | |
2748 | + @ write r1 to DACR | |
2749 | + mcr p15, 0, r1, c3, c0, 0 | |
2750 | + @ instruction sync | |
2751 | + instr_sync | |
2752 | + @ restore regs | |
2753 | + ldmia sp!, {r0, r1} | |
2754 | +#endif | |
2755 | + .endm | |
2756 | + | |
2757 | #ifndef CONFIG_THUMB2_KERNEL | |
2758 | .macro svc_exit, rpsr, irq = 0 | |
2759 | .if \irq != 0 | |
2760 | @@ -215,6 +269,9 @@ | |
2761 | blne trace_hardirqs_off | |
2762 | #endif | |
2763 | .endif | |
2764 | + | |
2765 | + pax_exit_kernel | |
2766 | + | |
2767 | msr spsr_cxsf, \rpsr | |
2768 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) | |
2769 | @ We must avoid clrex due to Cortex-A15 erratum #830321 | |
2770 | @@ -290,6 +347,9 @@ | |
2771 | blne trace_hardirqs_off | |
2772 | #endif | |
2773 | .endif | |
2774 | + | |
2775 | + pax_exit_kernel | |
2776 | + | |
2777 | ldr lr, [sp, #S_SP] @ top of the stack | |
2778 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | |
2779 | ||
2780 | diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c | |
2781 | index b37752a..ff5cb72 100644 | |
2782 | --- a/arch/arm/kernel/fiq.c | |
2783 | +++ b/arch/arm/kernel/fiq.c | |
2784 | @@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length) | |
2785 | void *base = vectors_page; | |
2786 | unsigned offset = FIQ_OFFSET; | |
2787 | ||
2788 | + pax_open_kernel(); | |
2789 | memcpy(base + offset, start, length); | |
2790 | + pax_close_kernel(); | |
2791 | + | |
2792 | if (!cache_is_vipt_nonaliasing()) | |
2793 | flush_icache_range((unsigned long)base + offset, offset + | |
2794 | length); | |
2795 | diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S | |
2796 | index 664eee8..f470938 100644 | |
2797 | --- a/arch/arm/kernel/head.S | |
2798 | +++ b/arch/arm/kernel/head.S | |
2799 | @@ -437,7 +437,7 @@ __enable_mmu: | |
2800 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | |
2801 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | |
2802 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | |
2803 | - domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | |
2804 | + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT)) | |
2805 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | |
2806 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | |
2807 | #endif | |
2808 | diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c | |
2809 | index 6a4dffe..4a86a70 100644 | |
2810 | --- a/arch/arm/kernel/module.c | |
2811 | +++ b/arch/arm/kernel/module.c | |
2812 | @@ -38,12 +38,39 @@ | |
2813 | #endif | |
2814 | ||
2815 | #ifdef CONFIG_MMU | |
2816 | -void *module_alloc(unsigned long size) | |
2817 | +static inline void *__module_alloc(unsigned long size, pgprot_t prot) | |
2818 | { | |
2819 | + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR) | |
2820 | + return NULL; | |
2821 | return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, | |
2822 | - GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, | |
2823 | + GFP_KERNEL, prot, NUMA_NO_NODE, | |
2824 | __builtin_return_address(0)); | |
2825 | } | |
2826 | + | |
2827 | +void *module_alloc(unsigned long size) | |
2828 | +{ | |
2829 | + | |
2830 | +#ifdef CONFIG_PAX_KERNEXEC | |
2831 | + return __module_alloc(size, PAGE_KERNEL); | |
2832 | +#else | |
2833 | + return __module_alloc(size, PAGE_KERNEL_EXEC); | |
2834 | +#endif | |
2835 | + | |
2836 | +} | |
2837 | + | |
2838 | +#ifdef CONFIG_PAX_KERNEXEC | |
2839 | +void module_free_exec(struct module *mod, void *module_region) | |
2840 | +{ | |
2841 | + module_free(mod, module_region); | |
2842 | +} | |
2843 | +EXPORT_SYMBOL(module_free_exec); | |
2844 | + | |
2845 | +void *module_alloc_exec(unsigned long size) | |
2846 | +{ | |
2847 | + return __module_alloc(size, PAGE_KERNEL_EXEC); | |
2848 | +} | |
2849 | +EXPORT_SYMBOL(module_alloc_exec); | |
2850 | +#endif | |
2851 | #endif | |
2852 | ||
2853 | int | |
2854 | diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c | |
2855 | index 07314af..c46655c 100644 | |
2856 | --- a/arch/arm/kernel/patch.c | |
2857 | +++ b/arch/arm/kernel/patch.c | |
2858 | @@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) | |
2859 | bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); | |
2860 | int size; | |
2861 | ||
2862 | + pax_open_kernel(); | |
2863 | if (thumb2 && __opcode_is_thumb16(insn)) { | |
2864 | *(u16 *)addr = __opcode_to_mem_thumb16(insn); | |
2865 | size = sizeof(u16); | |
2866 | @@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) | |
2867 | *(u32 *)addr = insn; | |
2868 | size = sizeof(u32); | |
2869 | } | |
2870 | + pax_close_kernel(); | |
2871 | ||
2872 | flush_icache_range((uintptr_t)(addr), | |
2873 | (uintptr_t)(addr) + size); | |
2874 | diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c | |
2875 | index fe972a2..a772d83 100644 | |
2876 | --- a/arch/arm/kernel/process.c | |
2877 | +++ b/arch/arm/kernel/process.c | |
2878 | @@ -207,6 +207,7 @@ void machine_power_off(void) | |
2879 | ||
2880 | if (pm_power_off) | |
2881 | pm_power_off(); | |
2882 | + BUG(); | |
2883 | } | |
2884 | ||
2885 | /* | |
2886 | @@ -220,7 +221,7 @@ void machine_power_off(void) | |
2887 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
2888 | * to use. Implementing such co-ordination would be essentially impossible. | |
2889 | */ | |
2890 | -void machine_restart(char *cmd) | |
2891 | +__noreturn void machine_restart(char *cmd) | |
2892 | { | |
2893 | local_irq_disable(); | |
2894 | smp_send_stop(); | |
2895 | @@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs) | |
2896 | ||
2897 | show_regs_print_info(KERN_DEFAULT); | |
2898 | ||
2899 | - print_symbol("PC is at %s\n", instruction_pointer(regs)); | |
2900 | - print_symbol("LR is at %s\n", regs->ARM_lr); | |
2901 | + printk("PC is at %pA\n", (void *)instruction_pointer(regs)); | |
2902 | + printk("LR is at %pA\n", (void *)regs->ARM_lr); | |
2903 | printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" | |
2904 | "sp : %08lx ip : %08lx fp : %08lx\n", | |
2905 | regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, | |
2906 | @@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p) | |
2907 | return 0; | |
2908 | } | |
2909 | ||
2910 | -unsigned long arch_randomize_brk(struct mm_struct *mm) | |
2911 | -{ | |
2912 | - unsigned long range_end = mm->brk + 0x02000000; | |
2913 | - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
2914 | -} | |
2915 | - | |
2916 | #ifdef CONFIG_MMU | |
2917 | #ifdef CONFIG_KUSER_HELPERS | |
2918 | /* | |
2919 | @@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = { | |
2920 | ||
2921 | static int __init gate_vma_init(void) | |
2922 | { | |
2923 | - gate_vma.vm_page_prot = PAGE_READONLY_EXEC; | |
2924 | + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); | |
2925 | return 0; | |
2926 | } | |
2927 | arch_initcall(gate_vma_init); | |
2928 | @@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma) | |
2929 | return is_gate_vma(vma) ? "[vectors]" : NULL; | |
2930 | } | |
2931 | ||
2932 | -/* If possible, provide a placement hint at a random offset from the | |
2933 | - * stack for the signal page. | |
2934 | - */ | |
2935 | -static unsigned long sigpage_addr(const struct mm_struct *mm, | |
2936 | - unsigned int npages) | |
2937 | -{ | |
2938 | - unsigned long offset; | |
2939 | - unsigned long first; | |
2940 | - unsigned long last; | |
2941 | - unsigned long addr; | |
2942 | - unsigned int slots; | |
2943 | - | |
2944 | - first = PAGE_ALIGN(mm->start_stack); | |
2945 | - | |
2946 | - last = TASK_SIZE - (npages << PAGE_SHIFT); | |
2947 | - | |
2948 | - /* No room after stack? */ | |
2949 | - if (first > last) | |
2950 | - return 0; | |
2951 | - | |
2952 | - /* Just enough room? */ | |
2953 | - if (first == last) | |
2954 | - return first; | |
2955 | - | |
2956 | - slots = ((last - first) >> PAGE_SHIFT) + 1; | |
2957 | - | |
2958 | - offset = get_random_int() % slots; | |
2959 | - | |
2960 | - addr = first + (offset << PAGE_SHIFT); | |
2961 | - | |
2962 | - return addr; | |
2963 | -} | |
2964 | - | |
2965 | -static struct page *signal_page; | |
2966 | -extern struct page *get_signal_page(void); | |
2967 | - | |
2968 | -static const struct vm_special_mapping sigpage_mapping = { | |
2969 | - .name = "[sigpage]", | |
2970 | - .pages = &signal_page, | |
2971 | -}; | |
2972 | - | |
2973 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |
2974 | { | |
2975 | struct mm_struct *mm = current->mm; | |
2976 | - struct vm_area_struct *vma; | |
2977 | - unsigned long addr; | |
2978 | - unsigned long hint; | |
2979 | - int ret = 0; | |
2980 | - | |
2981 | - if (!signal_page) | |
2982 | - signal_page = get_signal_page(); | |
2983 | - if (!signal_page) | |
2984 | - return -ENOMEM; | |
2985 | ||
2986 | down_write(&mm->mmap_sem); | |
2987 | - hint = sigpage_addr(mm, 1); | |
2988 | - addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); | |
2989 | - if (IS_ERR_VALUE(addr)) { | |
2990 | - ret = addr; | |
2991 | - goto up_fail; | |
2992 | - } | |
2993 | - | |
2994 | - vma = _install_special_mapping(mm, addr, PAGE_SIZE, | |
2995 | - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | |
2996 | - &sigpage_mapping); | |
2997 | - | |
2998 | - if (IS_ERR(vma)) { | |
2999 | - ret = PTR_ERR(vma); | |
3000 | - goto up_fail; | |
3001 | - } | |
3002 | - | |
3003 | - mm->context.sigpage = addr; | |
3004 | - | |
3005 | - up_fail: | |
3006 | + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC; | |
3007 | up_write(&mm->mmap_sem); | |
3008 | - return ret; | |
3009 | + return 0; | |
3010 | } | |
3011 | #endif | |
3012 | diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c | |
3013 | index f73891b..cf3004e 100644 | |
3014 | --- a/arch/arm/kernel/psci.c | |
3015 | +++ b/arch/arm/kernel/psci.c | |
3016 | @@ -28,7 +28,7 @@ | |
3017 | #include <asm/psci.h> | |
3018 | #include <asm/system_misc.h> | |
3019 | ||
3020 | -struct psci_operations psci_ops; | |
3021 | +struct psci_operations psci_ops __read_only; | |
3022 | ||
3023 | static int (*invoke_psci_fn)(u32, u32, u32, u32); | |
3024 | typedef int (*psci_initcall_t)(const struct device_node *); | |
3025 | diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c | |
3026 | index ef9119f..31995a3 100644 | |
3027 | --- a/arch/arm/kernel/ptrace.c | |
3028 | +++ b/arch/arm/kernel/ptrace.c | |
3029 | @@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs, | |
3030 | regs->ARM_ip = ip; | |
3031 | } | |
3032 | ||
3033 | +#ifdef CONFIG_GRKERNSEC_SETXID | |
3034 | +extern void gr_delayed_cred_worker(void); | |
3035 | +#endif | |
3036 | + | |
3037 | asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) | |
3038 | { | |
3039 | current_thread_info()->syscall = scno; | |
3040 | ||
3041 | +#ifdef CONFIG_GRKERNSEC_SETXID | |
3042 | + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) | |
3043 | + gr_delayed_cred_worker(); | |
3044 | +#endif | |
3045 | + | |
3046 | /* Do the secure computing check first; failures should be fast. */ | |
3047 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER | |
3048 | if (secure_computing() == -1) | |
3049 | diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c | |
3050 | index 306e1ac..1b477ed 100644 | |
3051 | --- a/arch/arm/kernel/setup.c | |
3052 | +++ b/arch/arm/kernel/setup.c | |
3053 | @@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap); | |
3054 | unsigned int elf_hwcap2 __read_mostly; | |
3055 | EXPORT_SYMBOL(elf_hwcap2); | |
3056 | ||
3057 | +pteval_t __supported_pte_mask __read_only; | |
3058 | +pmdval_t __supported_pmd_mask __read_only; | |
3059 | ||
3060 | #ifdef MULTI_CPU | |
3061 | -struct processor processor __read_mostly; | |
3062 | +struct processor processor __read_only; | |
3063 | #endif | |
3064 | #ifdef MULTI_TLB | |
3065 | -struct cpu_tlb_fns cpu_tlb __read_mostly; | |
3066 | +struct cpu_tlb_fns cpu_tlb __read_only; | |
3067 | #endif | |
3068 | #ifdef MULTI_USER | |
3069 | -struct cpu_user_fns cpu_user __read_mostly; | |
3070 | +struct cpu_user_fns cpu_user __read_only; | |
3071 | #endif | |
3072 | #ifdef MULTI_CACHE | |
3073 | -struct cpu_cache_fns cpu_cache __read_mostly; | |
3074 | +struct cpu_cache_fns cpu_cache __read_only; | |
3075 | #endif | |
3076 | #ifdef CONFIG_OUTER_CACHE | |
3077 | -struct outer_cache_fns outer_cache __read_mostly; | |
3078 | +struct outer_cache_fns outer_cache __read_only; | |
3079 | EXPORT_SYMBOL(outer_cache); | |
3080 | #endif | |
3081 | ||
3082 | @@ -252,9 +254,13 @@ static int __get_cpu_architecture(void) | |
3083 | asm("mrc p15, 0, %0, c0, c1, 4" | |
3084 | : "=r" (mmfr0)); | |
3085 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || | |
3086 | - (mmfr0 & 0x000000f0) >= 0x00000030) | |
3087 | + (mmfr0 & 0x000000f0) >= 0x00000030) { | |
3088 | cpu_arch = CPU_ARCH_ARMv7; | |
3089 | - else if ((mmfr0 & 0x0000000f) == 0x00000002 || | |
3090 | + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) { | |
3091 | + __supported_pte_mask |= L_PTE_PXN; | |
3092 | + __supported_pmd_mask |= PMD_PXNTABLE; | |
3093 | + } | |
3094 | + } else if ((mmfr0 & 0x0000000f) == 0x00000002 || | |
3095 | (mmfr0 & 0x000000f0) == 0x00000020) | |
3096 | cpu_arch = CPU_ARCH_ARMv6; | |
3097 | else | |
3098 | diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c | |
3099 | index bd19834..e4d8c66 100644 | |
3100 | --- a/arch/arm/kernel/signal.c | |
3101 | +++ b/arch/arm/kernel/signal.c | |
3102 | @@ -24,8 +24,6 @@ | |
3103 | ||
3104 | extern const unsigned long sigreturn_codes[7]; | |
3105 | ||
3106 | -static unsigned long signal_return_offset; | |
3107 | - | |
3108 | #ifdef CONFIG_CRUNCH | |
3109 | static int preserve_crunch_context(struct crunch_sigframe __user *frame) | |
3110 | { | |
3111 | @@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |
3112 | * except when the MPU has protected the vectors | |
3113 | * page from PL0 | |
3114 | */ | |
3115 | - retcode = mm->context.sigpage + signal_return_offset + | |
3116 | - (idx << 2) + thumb; | |
3117 | + retcode = mm->context.sigpage + (idx << 2) + thumb; | |
3118 | } else | |
3119 | #endif | |
3120 | { | |
3121 | @@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |
3122 | } while (thread_flags & _TIF_WORK_MASK); | |
3123 | return 0; | |
3124 | } | |
3125 | - | |
3126 | -struct page *get_signal_page(void) | |
3127 | -{ | |
3128 | - unsigned long ptr; | |
3129 | - unsigned offset; | |
3130 | - struct page *page; | |
3131 | - void *addr; | |
3132 | - | |
3133 | - page = alloc_pages(GFP_KERNEL, 0); | |
3134 | - | |
3135 | - if (!page) | |
3136 | - return NULL; | |
3137 | - | |
3138 | - addr = page_address(page); | |
3139 | - | |
3140 | - /* Give the signal return code some randomness */ | |
3141 | - offset = 0x200 + (get_random_int() & 0x7fc); | |
3142 | - signal_return_offset = offset; | |
3143 | - | |
3144 | - /* | |
3145 | - * Copy signal return handlers into the vector page, and | |
3146 | - * set sigreturn to be a pointer to these. | |
3147 | - */ | |
3148 | - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); | |
3149 | - | |
3150 | - ptr = (unsigned long)addr + offset; | |
3151 | - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); | |
3152 | - | |
3153 | - return page; | |
3154 | -} | |
3155 | diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c | |
3156 | index a8e32aa..b2f7198 100644 | |
3157 | --- a/arch/arm/kernel/smp.c | |
3158 | +++ b/arch/arm/kernel/smp.c | |
3159 | @@ -76,7 +76,7 @@ enum ipi_msg_type { | |
3160 | ||
3161 | static DECLARE_COMPLETION(cpu_running); | |
3162 | ||
3163 | -static struct smp_operations smp_ops; | |
3164 | +static struct smp_operations smp_ops __read_only; | |
3165 | ||
3166 | void __init smp_set_ops(struct smp_operations *ops) | |
3167 | { | |
3168 | diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c | |
3169 | index 7a3be1d..b00c7de 100644 | |
3170 | --- a/arch/arm/kernel/tcm.c | |
3171 | +++ b/arch/arm/kernel/tcm.c | |
3172 | @@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = { | |
3173 | .virtual = ITCM_OFFSET, | |
3174 | .pfn = __phys_to_pfn(ITCM_OFFSET), | |
3175 | .length = 0, | |
3176 | - .type = MT_MEMORY_RWX_ITCM, | |
3177 | + .type = MT_MEMORY_RX_ITCM, | |
3178 | } | |
3179 | }; | |
3180 | ||
3181 | @@ -267,7 +267,9 @@ no_dtcm: | |
3182 | start = &__sitcm_text; | |
3183 | end = &__eitcm_text; | |
3184 | ram = &__itcm_start; | |
3185 | + pax_open_kernel(); | |
3186 | memcpy(start, ram, itcm_code_sz); | |
3187 | + pax_close_kernel(); | |
3188 | pr_debug("CPU ITCM: copied code from %p - %p\n", | |
3189 | start, end); | |
3190 | itcm_present = true; | |
3191 | diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c | |
3192 | index 9f5d818..e013427 100644 | |
3193 | --- a/arch/arm/kernel/traps.c | |
3194 | +++ b/arch/arm/kernel/traps.c | |
3195 | @@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); | |
3196 | void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) | |
3197 | { | |
3198 | #ifdef CONFIG_KALLSYMS | |
3199 | - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); | |
3200 | + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from); | |
3201 | #else | |
3202 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); | |
3203 | #endif | |
3204 | @@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
3205 | static int die_owner = -1; | |
3206 | static unsigned int die_nest_count; | |
3207 | ||
3208 | +extern void gr_handle_kernel_exploit(void); | |
3209 | + | |
3210 | static unsigned long oops_begin(void) | |
3211 | { | |
3212 | int cpu; | |
3213 | @@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |
3214 | panic("Fatal exception in interrupt"); | |
3215 | if (panic_on_oops) | |
3216 | panic("Fatal exception"); | |
3217 | + | |
3218 | + gr_handle_kernel_exploit(); | |
3219 | + | |
3220 | if (signr) | |
3221 | do_exit(signr); | |
3222 | } | |
3223 | @@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base) | |
3224 | kuser_init(vectors_base); | |
3225 | ||
3226 | flush_icache_range(vectors, vectors + PAGE_SIZE * 2); | |
3227 | - modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | |
3228 | + | |
3229 | +#ifndef CONFIG_PAX_MEMORY_UDEREF | |
3230 | + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT); | |
3231 | +#endif | |
3232 | + | |
3233 | #else /* ifndef CONFIG_CPU_V7M */ | |
3234 | /* | |
3235 | * on V7-M there is no need to copy the vector table to a dedicated | |
3236 | diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S | |
3237 | index 8e95aa4..595dfc8 100644 | |
3238 | --- a/arch/arm/kernel/vmlinux.lds.S | |
3239 | +++ b/arch/arm/kernel/vmlinux.lds.S | |
3240 | @@ -8,7 +8,11 @@ | |
3241 | #include <asm/thread_info.h> | |
3242 | #include <asm/memory.h> | |
3243 | #include <asm/page.h> | |
3244 | - | |
3245 | + | |
3246 | +#ifdef CONFIG_PAX_KERNEXEC | |
3247 | +#include <asm/pgtable.h> | |
3248 | +#endif | |
3249 | + | |
3250 | #define PROC_INFO \ | |
3251 | . = ALIGN(4); \ | |
3252 | VMLINUX_SYMBOL(__proc_info_begin) = .; \ | |
3253 | @@ -34,7 +38,7 @@ | |
3254 | #endif | |
3255 | ||
3256 | #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ | |
3257 | - defined(CONFIG_GENERIC_BUG) | |
3258 | + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT) | |
3259 | #define ARM_EXIT_KEEP(x) x | |
3260 | #define ARM_EXIT_DISCARD(x) | |
3261 | #else | |
3262 | @@ -90,6 +94,11 @@ SECTIONS | |
3263 | _text = .; | |
3264 | HEAD_TEXT | |
3265 | } | |
3266 | + | |
3267 | +#ifdef CONFIG_PAX_KERNEXEC | |
3268 | + . = ALIGN(1<<SECTION_SHIFT); | |
3269 | +#endif | |
3270 | + | |
3271 | .text : { /* Real text segment */ | |
3272 | _stext = .; /* Text and read-only data */ | |
3273 | __exception_text_start = .; | |
3274 | @@ -112,6 +121,8 @@ SECTIONS | |
3275 | ARM_CPU_KEEP(PROC_INFO) | |
3276 | } | |
3277 | ||
3278 | + _etext = .; /* End of text section */ | |
3279 | + | |
3280 | RO_DATA(PAGE_SIZE) | |
3281 | ||
3282 | . = ALIGN(4); | |
3283 | @@ -142,7 +153,9 @@ SECTIONS | |
3284 | ||
3285 | NOTES | |
3286 | ||
3287 | - _etext = .; /* End of text and rodata section */ | |
3288 | +#ifdef CONFIG_PAX_KERNEXEC | |
3289 | + . = ALIGN(1<<SECTION_SHIFT); | |
3290 | +#endif | |
3291 | ||
3292 | #ifndef CONFIG_XIP_KERNEL | |
3293 | . = ALIGN(PAGE_SIZE); | |
3294 | @@ -221,6 +234,11 @@ SECTIONS | |
3295 | #else | |
3296 | . = ALIGN(THREAD_SIZE); | |
3297 | __init_end = .; | |
3298 | + | |
3299 | +#ifdef CONFIG_PAX_KERNEXEC | |
3300 | + . = ALIGN(1<<SECTION_SHIFT); | |
3301 | +#endif | |
3302 | + | |
3303 | __data_loc = .; | |
3304 | #endif | |
3305 | ||
3306 | diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c | |
3307 | index 9e193c8..3560fe6 100644 | |
3308 | --- a/arch/arm/kvm/arm.c | |
3309 | +++ b/arch/arm/kvm/arm.c | |
3310 | @@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors; | |
3311 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); | |
3312 | ||
3313 | /* The VMID used in the VTTBR */ | |
3314 | -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | |
3315 | +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1); | |
3316 | static u8 kvm_next_vmid; | |
3317 | static DEFINE_SPINLOCK(kvm_vmid_lock); | |
3318 | ||
3319 | @@ -354,7 +354,7 @@ void force_vm_exit(const cpumask_t *mask) | |
3320 | */ | |
3321 | static bool need_new_vmid_gen(struct kvm *kvm) | |
3322 | { | |
3323 | - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); | |
3324 | + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen)); | |
3325 | } | |
3326 | ||
3327 | /** | |
3328 | @@ -387,7 +387,7 @@ static void update_vttbr(struct kvm *kvm) | |
3329 | ||
3330 | /* First user of a new VMID generation? */ | |
3331 | if (unlikely(kvm_next_vmid == 0)) { | |
3332 | - atomic64_inc(&kvm_vmid_gen); | |
3333 | + atomic64_inc_unchecked(&kvm_vmid_gen); | |
3334 | kvm_next_vmid = 1; | |
3335 | ||
3336 | /* | |
3337 | @@ -404,7 +404,7 @@ static void update_vttbr(struct kvm *kvm) | |
3338 | kvm_call_hyp(__kvm_flush_vm_context); | |
3339 | } | |
3340 | ||
3341 | - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); | |
3342 | + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen); | |
3343 | kvm->arch.vmid = kvm_next_vmid; | |
3344 | kvm_next_vmid++; | |
3345 | ||
3346 | @@ -980,7 +980,7 @@ static void check_kvm_target_cpu(void *ret) | |
3347 | /** | |
3348 | * Initialize Hyp-mode and memory mappings on all CPUs. | |
3349 | */ | |
3350 | -int kvm_arch_init(void *opaque) | |
3351 | +int kvm_arch_init(const void *opaque) | |
3352 | { | |
3353 | int err; | |
3354 | int ret, cpu; | |
3355 | diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S | |
3356 | index 14a0d98..7771a7d 100644 | |
3357 | --- a/arch/arm/lib/clear_user.S | |
3358 | +++ b/arch/arm/lib/clear_user.S | |
3359 | @@ -12,14 +12,14 @@ | |
3360 | ||
3361 | .text | |
3362 | ||
3363 | -/* Prototype: int __clear_user(void *addr, size_t sz) | |
3364 | +/* Prototype: int ___clear_user(void *addr, size_t sz) | |
3365 | * Purpose : clear some user memory | |
3366 | * Params : addr - user memory address to clear | |
3367 | * : sz - number of bytes to clear | |
3368 | * Returns : number of bytes NOT cleared | |
3369 | */ | |
3370 | ENTRY(__clear_user_std) | |
3371 | -WEAK(__clear_user) | |
3372 | +WEAK(___clear_user) | |
3373 | stmfd sp!, {r1, lr} | |
3374 | mov r2, #0 | |
3375 | cmp r1, #4 | |
3376 | @@ -44,7 +44,7 @@ WEAK(__clear_user) | |
3377 | USER( strnebt r2, [r0]) | |
3378 | mov r0, #0 | |
3379 | ldmfd sp!, {r1, pc} | |
3380 | -ENDPROC(__clear_user) | |
3381 | +ENDPROC(___clear_user) | |
3382 | ENDPROC(__clear_user_std) | |
3383 | ||
3384 | .pushsection .fixup,"ax" | |
3385 | diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S | |
3386 | index 66a477a..bee61d3 100644 | |
3387 | --- a/arch/arm/lib/copy_from_user.S | |
3388 | +++ b/arch/arm/lib/copy_from_user.S | |
3389 | @@ -16,7 +16,7 @@ | |
3390 | /* | |
3391 | * Prototype: | |
3392 | * | |
3393 | - * size_t __copy_from_user(void *to, const void *from, size_t n) | |
3394 | + * size_t ___copy_from_user(void *to, const void *from, size_t n) | |
3395 | * | |
3396 | * Purpose: | |
3397 | * | |
3398 | @@ -84,11 +84,11 @@ | |
3399 | ||
3400 | .text | |
3401 | ||
3402 | -ENTRY(__copy_from_user) | |
3403 | +ENTRY(___copy_from_user) | |
3404 | ||
3405 | #include "copy_template.S" | |
3406 | ||
3407 | -ENDPROC(__copy_from_user) | |
3408 | +ENDPROC(___copy_from_user) | |
3409 | ||
3410 | .pushsection .fixup,"ax" | |
3411 | .align 0 | |
3412 | diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S | |
3413 | index 6ee2f67..d1cce76 100644 | |
3414 | --- a/arch/arm/lib/copy_page.S | |
3415 | +++ b/arch/arm/lib/copy_page.S | |
3416 | @@ -10,6 +10,7 @@ | |
3417 | * ASM optimised string functions | |
3418 | */ | |
3419 | #include <linux/linkage.h> | |
3420 | +#include <linux/const.h> | |
3421 | #include <asm/assembler.h> | |
3422 | #include <asm/asm-offsets.h> | |
3423 | #include <asm/cache.h> | |
3424 | diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S | |
3425 | index d066df6..df28194 100644 | |
3426 | --- a/arch/arm/lib/copy_to_user.S | |
3427 | +++ b/arch/arm/lib/copy_to_user.S | |
3428 | @@ -16,7 +16,7 @@ | |
3429 | /* | |
3430 | * Prototype: | |
3431 | * | |
3432 | - * size_t __copy_to_user(void *to, const void *from, size_t n) | |
3433 | + * size_t ___copy_to_user(void *to, const void *from, size_t n) | |
3434 | * | |
3435 | * Purpose: | |
3436 | * | |
3437 | @@ -88,11 +88,11 @@ | |
3438 | .text | |
3439 | ||
3440 | ENTRY(__copy_to_user_std) | |
3441 | -WEAK(__copy_to_user) | |
3442 | +WEAK(___copy_to_user) | |
3443 | ||
3444 | #include "copy_template.S" | |
3445 | ||
3446 | -ENDPROC(__copy_to_user) | |
3447 | +ENDPROC(___copy_to_user) | |
3448 | ENDPROC(__copy_to_user_std) | |
3449 | ||
3450 | .pushsection .fixup,"ax" | |
3451 | diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S | |
3452 | index 7d08b43..f7ca7ea 100644 | |
3453 | --- a/arch/arm/lib/csumpartialcopyuser.S | |
3454 | +++ b/arch/arm/lib/csumpartialcopyuser.S | |
3455 | @@ -57,8 +57,8 @@ | |
3456 | * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT | |
3457 | */ | |
3458 | ||
3459 | -#define FN_ENTRY ENTRY(csum_partial_copy_from_user) | |
3460 | -#define FN_EXIT ENDPROC(csum_partial_copy_from_user) | |
3461 | +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user) | |
3462 | +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user) | |
3463 | ||
3464 | #include "csumpartialcopygeneric.S" | |
3465 | ||
3466 | diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c | |
3467 | index 312d43e..21d2322 100644 | |
3468 | --- a/arch/arm/lib/delay.c | |
3469 | +++ b/arch/arm/lib/delay.c | |
3470 | @@ -29,7 +29,7 @@ | |
3471 | /* | |
3472 | * Default to the loop-based delay implementation. | |
3473 | */ | |
3474 | -struct arm_delay_ops arm_delay_ops = { | |
3475 | +struct arm_delay_ops arm_delay_ops __read_only = { | |
3476 | .delay = __loop_delay, | |
3477 | .const_udelay = __loop_const_udelay, | |
3478 | .udelay = __loop_udelay, | |
3479 | diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c | |
3480 | index 3e58d71..029817c 100644 | |
3481 | --- a/arch/arm/lib/uaccess_with_memcpy.c | |
3482 | +++ b/arch/arm/lib/uaccess_with_memcpy.c | |
3483 | @@ -136,7 +136,7 @@ out: | |
3484 | } | |
3485 | ||
3486 | unsigned long | |
3487 | -__copy_to_user(void __user *to, const void *from, unsigned long n) | |
3488 | +___copy_to_user(void __user *to, const void *from, unsigned long n) | |
3489 | { | |
3490 | /* | |
3491 | * This test is stubbed out of the main function above to keep | |
3492 | @@ -190,7 +190,7 @@ out: | |
3493 | return n; | |
3494 | } | |
3495 | ||
3496 | -unsigned long __clear_user(void __user *addr, unsigned long n) | |
3497 | +unsigned long ___clear_user(void __user *addr, unsigned long n) | |
3498 | { | |
3499 | /* See rational for this in __copy_to_user() above. */ | |
3500 | if (n < 64) | |
3501 | diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c | |
3502 | index 9610792..4dfb851 100644 | |
3503 | --- a/arch/arm/mach-at91/setup.c | |
3504 | +++ b/arch/arm/mach-at91/setup.c | |
3505 | @@ -83,7 +83,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length) | |
3506 | ||
3507 | desc->pfn = __phys_to_pfn(base); | |
3508 | desc->length = length; | |
3509 | - desc->type = MT_MEMORY_RWX_NONCACHED; | |
3510 | + desc->type = MT_MEMORY_RW_NONCACHED; | |
3511 | ||
3512 | pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n", | |
3513 | base, length, desc->virtual); | |
3514 | diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c | |
3515 | index 7f352de..6dc0929 100644 | |
3516 | --- a/arch/arm/mach-keystone/keystone.c | |
3517 | +++ b/arch/arm/mach-keystone/keystone.c | |
3518 | @@ -27,7 +27,7 @@ | |
3519 | ||
3520 | #include "keystone.h" | |
3521 | ||
3522 | -static struct notifier_block platform_nb; | |
3523 | +static notifier_block_no_const platform_nb; | |
3524 | static unsigned long keystone_dma_pfn_offset __read_mostly; | |
3525 | ||
3526 | static int keystone_platform_notifier(struct notifier_block *nb, | |
3527 | diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c | |
3528 | index 1163a3e..424adbf 100644 | |
3529 | --- a/arch/arm/mach-mvebu/coherency.c | |
3530 | +++ b/arch/arm/mach-mvebu/coherency.c | |
3531 | @@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np) | |
3532 | ||
3533 | /* | |
3534 | * This ioremap hook is used on Armada 375/38x to ensure that PCIe | |
3535 | - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This | |
3536 | + * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This | |
3537 | * is needed as a workaround for a deadlock issue between the PCIe | |
3538 | * interface and the cache controller. | |
3539 | */ | |
3540 | @@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, | |
3541 | mvebu_mbus_get_pcie_mem_aperture(&pcie_mem); | |
3542 | ||
3543 | if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end) | |
3544 | - mtype = MT_UNCACHED; | |
3545 | + mtype = MT_UNCACHED_RW; | |
3546 | ||
3547 | return __arm_ioremap_caller(phys_addr, size, mtype, caller); | |
3548 | } | |
3549 | diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c | |
3550 | index 97767a2..9233746 100644 | |
3551 | --- a/arch/arm/mach-omap2/board-n8x0.c | |
3552 | +++ b/arch/arm/mach-omap2/board-n8x0.c | |
3553 | @@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev) | |
3554 | } | |
3555 | #endif | |
3556 | ||
3557 | -struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { | |
3558 | +struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { | |
3559 | .late_init = n8x0_menelaus_late_init, | |
3560 | }; | |
3561 | ||
3562 | diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c | |
3563 | index 5fa3755..1e8c247 100644 | |
3564 | --- a/arch/arm/mach-omap2/gpmc.c | |
3565 | +++ b/arch/arm/mach-omap2/gpmc.c | |
3566 | @@ -151,7 +151,6 @@ struct omap3_gpmc_regs { | |
3567 | }; | |
3568 | ||
3569 | static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; | |
3570 | -static struct irq_chip gpmc_irq_chip; | |
3571 | static int gpmc_irq_start; | |
3572 | ||
3573 | static struct resource gpmc_mem_root; | |
3574 | @@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { } | |
3575 | ||
3576 | static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } | |
3577 | ||
3578 | +static struct irq_chip gpmc_irq_chip = { | |
3579 | + .name = "gpmc", | |
3580 | + .irq_startup = gpmc_irq_noop_ret, | |
3581 | + .irq_enable = gpmc_irq_enable, | |
3582 | + .irq_disable = gpmc_irq_disable, | |
3583 | + .irq_shutdown = gpmc_irq_noop, | |
3584 | + .irq_ack = gpmc_irq_noop, | |
3585 | + .irq_mask = gpmc_irq_noop, | |
3586 | + .irq_unmask = gpmc_irq_noop, | |
3587 | + | |
3588 | +}; | |
3589 | + | |
3590 | static int gpmc_setup_irq(void) | |
3591 | { | |
3592 | int i; | |
3593 | @@ -750,15 +761,6 @@ static int gpmc_setup_irq(void) | |
3594 | return gpmc_irq_start; | |
3595 | } | |
3596 | ||
3597 | - gpmc_irq_chip.name = "gpmc"; | |
3598 | - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; | |
3599 | - gpmc_irq_chip.irq_enable = gpmc_irq_enable; | |
3600 | - gpmc_irq_chip.irq_disable = gpmc_irq_disable; | |
3601 | - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; | |
3602 | - gpmc_irq_chip.irq_ack = gpmc_irq_noop; | |
3603 | - gpmc_irq_chip.irq_mask = gpmc_irq_noop; | |
3604 | - gpmc_irq_chip.irq_unmask = gpmc_irq_noop; | |
3605 | - | |
3606 | gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; | |
3607 | gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; | |
3608 | ||
3609 | diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c | |
3610 | index 6944ae3..bc587ca 100644 | |
3611 | --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c | |
3612 | +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c | |
3613 | @@ -86,7 +86,7 @@ struct cpu_pm_ops { | |
3614 | void (*resume)(void); | |
3615 | void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); | |
3616 | void (*hotplug_restart)(void); | |
3617 | -}; | |
3618 | +} __no_const; | |
3619 | ||
3620 | static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); | |
3621 | static struct powerdomain *mpuss_pd; | |
3622 | @@ -105,7 +105,7 @@ static void dummy_cpu_resume(void) | |
3623 | static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) | |
3624 | {} | |
3625 | ||
3626 | -struct cpu_pm_ops omap_pm_ops = { | |
3627 | +static struct cpu_pm_ops omap_pm_ops __read_only = { | |
3628 | .finish_suspend = default_finish_suspend, | |
3629 | .resume = dummy_cpu_resume, | |
3630 | .scu_prepare = dummy_scu_prepare, | |
3631 | diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c | |
3632 | index f961c46..4a453dc 100644 | |
3633 | --- a/arch/arm/mach-omap2/omap-wakeupgen.c | |
3634 | +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |
3635 | @@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self, | |
3636 | return NOTIFY_OK; | |
3637 | } | |
3638 | ||
3639 | -static struct notifier_block __refdata irq_hotplug_notifier = { | |
3640 | +static struct notifier_block irq_hotplug_notifier = { | |
3641 | .notifier_call = irq_cpu_hotplug_notify, | |
3642 | }; | |
3643 | ||
3644 | diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c | |
3645 | index 8c58b71..95b655f 100644 | |
3646 | --- a/arch/arm/mach-omap2/omap_device.c | |
3647 | +++ b/arch/arm/mach-omap2/omap_device.c | |
3648 | @@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od) | |
3649 | struct platform_device __init *omap_device_build(const char *pdev_name, | |
3650 | int pdev_id, | |
3651 | struct omap_hwmod *oh, | |
3652 | - void *pdata, int pdata_len) | |
3653 | + const void *pdata, int pdata_len) | |
3654 | { | |
3655 | struct omap_hwmod *ohs[] = { oh }; | |
3656 | ||
3657 | @@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, | |
3658 | struct platform_device __init *omap_device_build_ss(const char *pdev_name, | |
3659 | int pdev_id, | |
3660 | struct omap_hwmod **ohs, | |
3661 | - int oh_cnt, void *pdata, | |
3662 | + int oh_cnt, const void *pdata, | |
3663 | int pdata_len) | |
3664 | { | |
3665 | int ret = -ENOMEM; | |
3666 | diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h | |
3667 | index 78c02b3..c94109a 100644 | |
3668 | --- a/arch/arm/mach-omap2/omap_device.h | |
3669 | +++ b/arch/arm/mach-omap2/omap_device.h | |
3670 | @@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev); | |
3671 | /* Core code interface */ | |
3672 | ||
3673 | struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, | |
3674 | - struct omap_hwmod *oh, void *pdata, | |
3675 | + struct omap_hwmod *oh, const void *pdata, | |
3676 | int pdata_len); | |
3677 | ||
3678 | struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, | |
3679 | struct omap_hwmod **oh, int oh_cnt, | |
3680 | - void *pdata, int pdata_len); | |
3681 | + const void *pdata, int pdata_len); | |
3682 | ||
3683 | struct omap_device *omap_device_alloc(struct platform_device *pdev, | |
3684 | struct omap_hwmod **ohs, int oh_cnt); | |
3685 | diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c | |
3686 | index 716247e..8df346d 100644 | |
3687 | --- a/arch/arm/mach-omap2/omap_hwmod.c | |
3688 | +++ b/arch/arm/mach-omap2/omap_hwmod.c | |
3689 | @@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops { | |
3690 | int (*init_clkdm)(struct omap_hwmod *oh); | |
3691 | void (*update_context_lost)(struct omap_hwmod *oh); | |
3692 | int (*get_context_lost)(struct omap_hwmod *oh); | |
3693 | -}; | |
3694 | +} __no_const; | |
3695 | ||
3696 | /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ | |
3697 | -static struct omap_hwmod_soc_ops soc_ops; | |
3698 | +static struct omap_hwmod_soc_ops soc_ops __read_only; | |
3699 | ||
3700 | /* omap_hwmod_list contains all registered struct omap_hwmods */ | |
3701 | static LIST_HEAD(omap_hwmod_list); | |
3702 | diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c | |
3703 | index 95fee54..cfa9cf1 100644 | |
3704 | --- a/arch/arm/mach-omap2/powerdomains43xx_data.c | |
3705 | +++ b/arch/arm/mach-omap2/powerdomains43xx_data.c | |
3706 | @@ -10,6 +10,7 @@ | |
3707 | ||
3708 | #include <linux/kernel.h> | |
3709 | #include <linux/init.h> | |
3710 | +#include <asm/pgtable.h> | |
3711 | ||
3712 | #include "powerdomain.h" | |
3713 | ||
3714 | @@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void) | |
3715 | ||
3716 | void __init am43xx_powerdomains_init(void) | |
3717 | { | |
3718 | - omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; | |
3719 | + pax_open_kernel(); | |
3720 | + *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; | |
3721 | + pax_close_kernel(); | |
3722 | pwrdm_register_platform_funcs(&omap4_pwrdm_operations); | |
3723 | pwrdm_register_pwrdms(powerdomains_am43xx); | |
3724 | pwrdm_complete_init(); | |
3725 | diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c | |
3726 | index ff0a68c..b312aa0 100644 | |
3727 | --- a/arch/arm/mach-omap2/wd_timer.c | |
3728 | +++ b/arch/arm/mach-omap2/wd_timer.c | |
3729 | @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void) | |
3730 | struct omap_hwmod *oh; | |
3731 | char *oh_name = "wd_timer2"; | |
3732 | char *dev_name = "omap_wdt"; | |
3733 | - struct omap_wd_timer_platform_data pdata; | |
3734 | + static struct omap_wd_timer_platform_data pdata = { | |
3735 | + .read_reset_sources = prm_read_reset_sources | |
3736 | + }; | |
3737 | ||
3738 | if (!cpu_class_is_omap2() || of_have_populated_dt()) | |
3739 | return 0; | |
3740 | @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void) | |
3741 | return -EINVAL; | |
3742 | } | |
3743 | ||
3744 | - pdata.read_reset_sources = prm_read_reset_sources; | |
3745 | - | |
3746 | pdev = omap_device_build(dev_name, id, oh, &pdata, | |
3747 | sizeof(struct omap_wd_timer_platform_data)); | |
3748 | WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", | |
3749 | diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c | |
3750 | index b30bf5c..d0825bf 100644 | |
3751 | --- a/arch/arm/mach-tegra/cpuidle-tegra20.c | |
3752 | +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c | |
3753 | @@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, | |
3754 | bool entered_lp2 = false; | |
3755 | ||
3756 | if (tegra_pending_sgi()) | |
3757 | - ACCESS_ONCE(abort_flag) = true; | |
3758 | + ACCESS_ONCE_RW(abort_flag) = true; | |
3759 | ||
3760 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); | |
3761 | ||
3762 | diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h | |
3763 | index 2dea8b5..6499da2 100644 | |
3764 | --- a/arch/arm/mach-ux500/setup.h | |
3765 | +++ b/arch/arm/mach-ux500/setup.h | |
3766 | @@ -33,13 +33,6 @@ extern void ux500_timer_init(void); | |
3767 | .type = MT_DEVICE, \ | |
3768 | } | |
3769 | ||
3770 | -#define __MEM_DEV_DESC(x, sz) { \ | |
3771 | - .virtual = IO_ADDRESS(x), \ | |
3772 | - .pfn = __phys_to_pfn(x), \ | |
3773 | - .length = sz, \ | |
3774 | - .type = MT_MEMORY_RWX, \ | |
3775 | -} | |
3776 | - | |
3777 | extern struct smp_operations ux500_smp_ops; | |
3778 | extern void ux500_cpu_die(unsigned int cpu); | |
3779 | ||
3780 | diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig | |
3781 | index 7eb94e6..799ad3e 100644 | |
3782 | --- a/arch/arm/mm/Kconfig | |
3783 | +++ b/arch/arm/mm/Kconfig | |
3784 | @@ -446,6 +446,7 @@ config CPU_32v5 | |
3785 | ||
3786 | config CPU_32v6 | |
3787 | bool | |
3788 | + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF | |
3789 | select TLS_REG_EMUL if !CPU_32v6K && !MMU | |
3790 | ||
3791 | config CPU_32v6K | |
3792 | @@ -600,6 +601,7 @@ config CPU_CP15_MPU | |
3793 | ||
3794 | config CPU_USE_DOMAINS | |
3795 | bool | |
3796 | + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF | |
3797 | help | |
3798 | This option enables or disables the use of domain switching | |
3799 | via the set_fs() function. | |
3800 | @@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS | |
3801 | ||
3802 | config KUSER_HELPERS | |
3803 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | |
3804 | - depends on MMU | |
3805 | + depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND) | |
3806 | default y | |
3807 | help | |
3808 | Warning: disabling this option may break user programs. | |
3809 | @@ -812,7 +814,7 @@ config KUSER_HELPERS | |
3810 | See Documentation/arm/kernel_user_helpers.txt for details. | |
3811 | ||
3812 | However, the fixed address nature of these helpers can be used | |
3813 | - by ROP (return orientated programming) authors when creating | |
3814 | + by ROP (Return Oriented Programming) authors when creating | |
3815 | exploits. | |
3816 | ||
3817 | If all of the binaries and libraries which run on your platform | |
3818 | diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c | |
3819 | index 83792f4..c25d36b 100644 | |
3820 | --- a/arch/arm/mm/alignment.c | |
3821 | +++ b/arch/arm/mm/alignment.c | |
3822 | @@ -216,10 +216,12 @@ union offset_union { | |
3823 | #define __get16_unaligned_check(ins,val,addr) \ | |
3824 | do { \ | |
3825 | unsigned int err = 0, v, a = addr; \ | |
3826 | + pax_open_userland(); \ | |
3827 | __get8_unaligned_check(ins,v,a,err); \ | |
3828 | val = v << ((BE) ? 8 : 0); \ | |
3829 | __get8_unaligned_check(ins,v,a,err); \ | |
3830 | val |= v << ((BE) ? 0 : 8); \ | |
3831 | + pax_close_userland(); \ | |
3832 | if (err) \ | |
3833 | goto fault; \ | |
3834 | } while (0) | |
3835 | @@ -233,6 +235,7 @@ union offset_union { | |
3836 | #define __get32_unaligned_check(ins,val,addr) \ | |
3837 | do { \ | |
3838 | unsigned int err = 0, v, a = addr; \ | |
3839 | + pax_open_userland(); \ | |
3840 | __get8_unaligned_check(ins,v,a,err); \ | |
3841 | val = v << ((BE) ? 24 : 0); \ | |
3842 | __get8_unaligned_check(ins,v,a,err); \ | |
3843 | @@ -241,6 +244,7 @@ union offset_union { | |
3844 | val |= v << ((BE) ? 8 : 16); \ | |
3845 | __get8_unaligned_check(ins,v,a,err); \ | |
3846 | val |= v << ((BE) ? 0 : 24); \ | |
3847 | + pax_close_userland(); \ | |
3848 | if (err) \ | |
3849 | goto fault; \ | |
3850 | } while (0) | |
3851 | @@ -254,6 +258,7 @@ union offset_union { | |
3852 | #define __put16_unaligned_check(ins,val,addr) \ | |
3853 | do { \ | |
3854 | unsigned int err = 0, v = val, a = addr; \ | |
3855 | + pax_open_userland(); \ | |
3856 | __asm__( FIRST_BYTE_16 \ | |
3857 | ARM( "1: "ins" %1, [%2], #1\n" ) \ | |
3858 | THUMB( "1: "ins" %1, [%2]\n" ) \ | |
3859 | @@ -273,6 +278,7 @@ union offset_union { | |
3860 | " .popsection\n" \ | |
3861 | : "=r" (err), "=&r" (v), "=&r" (a) \ | |
3862 | : "0" (err), "1" (v), "2" (a)); \ | |
3863 | + pax_close_userland(); \ | |
3864 | if (err) \ | |
3865 | goto fault; \ | |
3866 | } while (0) | |
3867 | @@ -286,6 +292,7 @@ union offset_union { | |
3868 | #define __put32_unaligned_check(ins,val,addr) \ | |
3869 | do { \ | |
3870 | unsigned int err = 0, v = val, a = addr; \ | |
3871 | + pax_open_userland(); \ | |
3872 | __asm__( FIRST_BYTE_32 \ | |
3873 | ARM( "1: "ins" %1, [%2], #1\n" ) \ | |
3874 | THUMB( "1: "ins" %1, [%2]\n" ) \ | |
3875 | @@ -315,6 +322,7 @@ union offset_union { | |
3876 | " .popsection\n" \ | |
3877 | : "=r" (err), "=&r" (v), "=&r" (a) \ | |
3878 | : "0" (err), "1" (v), "2" (a)); \ | |
3879 | + pax_close_userland(); \ | |
3880 | if (err) \ | |
3881 | goto fault; \ | |
3882 | } while (0) | |
3883 | diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c | |
3884 | index 5e65ca8..879e7b3 100644 | |
3885 | --- a/arch/arm/mm/cache-l2x0.c | |
3886 | +++ b/arch/arm/mm/cache-l2x0.c | |
3887 | @@ -42,7 +42,7 @@ struct l2c_init_data { | |
3888 | void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); | |
3889 | void (*save)(void __iomem *); | |
3890 | struct outer_cache_fns outer_cache; | |
3891 | -}; | |
3892 | +} __do_const; | |
3893 | ||
3894 | #define CACHE_LINE_SIZE 32 | |
3895 | ||
3896 | diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c | |
3897 | index 6eb97b3..ac509f6 100644 | |
3898 | --- a/arch/arm/mm/context.c | |
3899 | +++ b/arch/arm/mm/context.c | |
3900 | @@ -43,7 +43,7 @@ | |
3901 | #define NUM_USER_ASIDS ASID_FIRST_VERSION | |
3902 | ||
3903 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
3904 | -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | |
3905 | +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | |
3906 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | |
3907 | ||
3908 | static DEFINE_PER_CPU(atomic64_t, active_asids); | |
3909 | @@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |
3910 | { | |
3911 | static u32 cur_idx = 1; | |
3912 | u64 asid = atomic64_read(&mm->context.id); | |
3913 | - u64 generation = atomic64_read(&asid_generation); | |
3914 | + u64 generation = atomic64_read_unchecked(&asid_generation); | |
3915 | ||
3916 | if (asid != 0 && is_reserved_asid(asid)) { | |
3917 | /* | |
3918 | @@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |
3919 | */ | |
3920 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); | |
3921 | if (asid == NUM_USER_ASIDS) { | |
3922 | - generation = atomic64_add_return(ASID_FIRST_VERSION, | |
3923 | + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION, | |
3924 | &asid_generation); | |
3925 | flush_context(cpu); | |
3926 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); | |
3927 | @@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |
3928 | cpu_set_reserved_ttbr0(); | |
3929 | ||
3930 | asid = atomic64_read(&mm->context.id); | |
3931 | - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) | |
3932 | + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) | |
3933 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | |
3934 | goto switch_mm_fastpath; | |
3935 | ||
3936 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
3937 | /* Check that our ASID belongs to the current generation. */ | |
3938 | asid = atomic64_read(&mm->context.id); | |
3939 | - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { | |
3940 | + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) { | |
3941 | asid = new_context(mm, cpu); | |
3942 | atomic64_set(&mm->context.id, asid); | |
3943 | } | |
3944 | diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c | |
3945 | index eb8830a..e8ff52e 100644 | |
3946 | --- a/arch/arm/mm/fault.c | |
3947 | +++ b/arch/arm/mm/fault.c | |
3948 | @@ -25,6 +25,7 @@ | |
3949 | #include <asm/system_misc.h> | |
3950 | #include <asm/system_info.h> | |
3951 | #include <asm/tlbflush.h> | |
3952 | +#include <asm/sections.h> | |
3953 | ||
3954 | #include "fault.h" | |
3955 | ||
3956 | @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | |
3957 | if (fixup_exception(regs)) | |
3958 | return; | |
3959 | ||
3960 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
3961 | + if (addr < TASK_SIZE) { | |
3962 | + if (current->signal->curr_ip) | |
3963 | + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), | |
3964 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); | |
3965 | + else | |
3966 | + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), | |
3967 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); | |
3968 | + } | |
3969 | +#endif | |
3970 | + | |
3971 | +#ifdef CONFIG_PAX_KERNEXEC | |
3972 | + if ((fsr & FSR_WRITE) && | |
3973 | + (((unsigned long)_stext <= addr && addr < init_mm.end_code) || | |
3974 | + (MODULES_VADDR <= addr && addr < MODULES_END))) | |
3975 | + { | |
3976 | + if (current->signal->curr_ip) | |
3977 | + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), | |
3978 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); | |
3979 | + else | |
3980 | + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), | |
3981 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); | |
3982 | + } | |
3983 | +#endif | |
3984 | + | |
3985 | /* | |
3986 | * No handler, we'll have to terminate things with extreme prejudice. | |
3987 | */ | |
3988 | @@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, | |
3989 | } | |
3990 | #endif | |
3991 | ||
3992 | +#ifdef CONFIG_PAX_PAGEEXEC | |
3993 | + if (fsr & FSR_LNX_PF) { | |
3994 | + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); | |
3995 | + do_group_exit(SIGKILL); | |
3996 | + } | |
3997 | +#endif | |
3998 | + | |
3999 | tsk->thread.address = addr; | |
4000 | tsk->thread.error_code = fsr; | |
4001 | tsk->thread.trap_no = 14; | |
4002 | @@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
4003 | } | |
4004 | #endif /* CONFIG_MMU */ | |
4005 | ||
4006 | +#ifdef CONFIG_PAX_PAGEEXEC | |
4007 | +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) | |
4008 | +{ | |
4009 | + long i; | |
4010 | + | |
4011 | + printk(KERN_ERR "PAX: bytes at PC: "); | |
4012 | + for (i = 0; i < 20; i++) { | |
4013 | + unsigned char c; | |
4014 | + if (get_user(c, (__force unsigned char __user *)pc+i)) | |
4015 | + printk(KERN_CONT "?? "); | |
4016 | + else | |
4017 | + printk(KERN_CONT "%02x ", c); | |
4018 | + } | |
4019 | + printk("\n"); | |
4020 | + | |
4021 | + printk(KERN_ERR "PAX: bytes at SP-4: "); | |
4022 | + for (i = -1; i < 20; i++) { | |
4023 | + unsigned long c; | |
4024 | + if (get_user(c, (__force unsigned long __user *)sp+i)) | |
4025 | + printk(KERN_CONT "???????? "); | |
4026 | + else | |
4027 | + printk(KERN_CONT "%08lx ", c); | |
4028 | + } | |
4029 | + printk("\n"); | |
4030 | +} | |
4031 | +#endif | |
4032 | + | |
4033 | /* | |
4034 | * First Level Translation Fault Handler | |
4035 | * | |
4036 | @@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |
4037 | const struct fsr_info *inf = fsr_info + fsr_fs(fsr); | |
4038 | struct siginfo info; | |
4039 | ||
4040 | +#ifdef CONFIG_PAX_MEMORY_UDEREF | |
4041 | + if (addr < TASK_SIZE && is_domain_fault(fsr)) { | |
4042 | + if (current->signal->curr_ip) | |
4043 | + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), | |
4044 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); | |
4045 | + else | |
4046 | + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), | |
4047 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); | |
4048 | + goto die; | |
4049 | + } | |
4050 | +#endif | |
4051 | + | |
4052 | if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) | |
4053 | return; | |
4054 | ||
4055 | +die: | |
4056 | printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", | |
4057 | inf->name, fsr, addr); | |
4058 | ||
4059 | @@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * | |
4060 | ifsr_info[nr].name = name; | |
4061 | } | |
4062 | ||
4063 | +asmlinkage int sys_sigreturn(struct pt_regs *regs); | |
4064 | +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs); | |
4065 | + | |
4066 | asmlinkage void __exception | |
4067 | do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |
4068 | { | |
4069 | const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); | |
4070 | struct siginfo info; | |
4071 | + unsigned long pc = instruction_pointer(regs); | |
4072 | + | |
4073 | + if (user_mode(regs)) { | |
4074 | + unsigned long sigpage = current->mm->context.sigpage; | |
4075 | + | |
4076 | + if (sigpage <= pc && pc < sigpage + 7*4) { | |
4077 | + if (pc < sigpage + 3*4) | |
4078 | + sys_sigreturn(regs); | |
4079 | + else | |
4080 | + sys_rt_sigreturn(regs); | |
4081 | + return; | |
4082 | + } | |
4083 | + if (pc == 0xffff0f60UL) { | |
4084 | + /* | |
4085 | + * PaX: __kuser_cmpxchg64 emulation | |
4086 | + */ | |
4087 | + // TODO | |
4088 | + //regs->ARM_pc = regs->ARM_lr; | |
4089 | + //return; | |
4090 | + } | |
4091 | + if (pc == 0xffff0fa0UL) { | |
4092 | + /* | |
4093 | + * PaX: __kuser_memory_barrier emulation | |
4094 | + */ | |
4095 | + // dmb(); implied by the exception | |
4096 | + regs->ARM_pc = regs->ARM_lr; | |
4097 | + return; | |
4098 | + } | |
4099 | + if (pc == 0xffff0fc0UL) { | |
4100 | + /* | |
4101 | + * PaX: __kuser_cmpxchg emulation | |
4102 | + */ | |
4103 | + // TODO | |
4104 | + //long new; | |
4105 | + //int op; | |
4106 | + | |
4107 | + //op = FUTEX_OP_SET << 28; | |
4108 | + //new = futex_atomic_op_inuser(op, regs->ARM_r2); | |
4109 | + //regs->ARM_r0 = old != new; | |
4110 | + //regs->ARM_pc = regs->ARM_lr; | |
4111 | + //return; | |
4112 | + } | |
4113 | + if (pc == 0xffff0fe0UL) { | |
4114 | + /* | |
4115 | + * PaX: __kuser_get_tls emulation | |
4116 | + */ | |
4117 | + regs->ARM_r0 = current_thread_info()->tp_value[0]; | |
4118 | + regs->ARM_pc = regs->ARM_lr; | |
4119 | + return; | |
4120 | + } | |
4121 | + } | |
4122 | + | |
4123 | +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
4124 | + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) { | |
4125 | + if (current->signal->curr_ip) | |
4126 | + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), | |
4127 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), | |
4128 | + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); | |
4129 | + else | |
4130 | + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current), | |
4131 | + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), | |
4132 | + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); | |
4133 | + goto die; | |
4134 | + } | |
4135 | +#endif | |
4136 | + | |
4137 | +#ifdef CONFIG_PAX_REFCOUNT | |
4138 | + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { | |
4139 | +#ifdef CONFIG_THUMB2_KERNEL | |
4140 | + unsigned short bkpt; | |
4141 | + | |
4142 | + if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { | |
4143 | +#else | |
4144 | + unsigned int bkpt; | |
4145 | + | |
4146 | + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) { | |
4147 | +#endif | |
4148 | + current->thread.error_code = ifsr; | |
4149 | + current->thread.trap_no = 0; | |
4150 | + pax_report_refcount_overflow(regs); | |
4151 | + fixup_exception(regs); | |
4152 | + return; | |
4153 | + } | |
4154 | + } | |
4155 | +#endif | |
4156 | ||
4157 | if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) | |
4158 | return; | |
4159 | ||
4160 | +die: | |
4161 | printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", | |
4162 | inf->name, ifsr, addr); | |
4163 | ||
4164 | diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h | |
4165 | index cf08bdf..772656c 100644 | |
4166 | --- a/arch/arm/mm/fault.h | |
4167 | +++ b/arch/arm/mm/fault.h | |
4168 | @@ -3,6 +3,7 @@ | |
4169 | ||
4170 | /* | |
4171 | * Fault status register encodings. We steal bit 31 for our own purposes. | |
4172 | + * Set when the FSR value is from an instruction fault. | |
4173 | */ | |
4174 | #define FSR_LNX_PF (1 << 31) | |
4175 | #define FSR_WRITE (1 << 11) | |
4176 | @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr) | |
4177 | } | |
4178 | #endif | |
4179 | ||
4180 | +/* valid for LPAE and !LPAE */ | |
4181 | +static inline int is_xn_fault(unsigned int fsr) | |
4182 | +{ | |
4183 | + return ((fsr_fs(fsr) & 0x3c) == 0xc); | |
4184 | +} | |
4185 | + | |
4186 | +static inline int is_domain_fault(unsigned int fsr) | |
4187 | +{ | |
4188 | + return ((fsr_fs(fsr) & 0xD) == 0x9); | |
4189 | +} | |
4190 | + | |
4191 | void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | |
4192 | unsigned long search_exception_table(unsigned long addr); | |
4193 | ||
4194 | diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c | |
4195 | index 9481f85..6dae261 100644 | |
4196 | --- a/arch/arm/mm/init.c | |
4197 | +++ b/arch/arm/mm/init.c | |
4198 | @@ -31,6 +31,8 @@ | |
4199 | #include <asm/setup.h> | |
4200 | #include <asm/tlb.h> | |
4201 | #include <asm/fixmap.h> | |
4202 | +#include <asm/system_info.h> | |
4203 | +#include <asm/cp15.h> | |
4204 | ||
4205 | #include <asm/mach/arch.h> | |
4206 | #include <asm/mach/map.h> | |
4207 | @@ -619,7 +621,46 @@ void free_initmem(void) | |
4208 | { | |
4209 | #ifdef CONFIG_HAVE_TCM | |
4210 | extern char __tcm_start, __tcm_end; | |
4211 | +#endif | |
4212 | ||
4213 | +#ifdef CONFIG_PAX_KERNEXEC | |
4214 | + unsigned long addr; | |
4215 | + pgd_t *pgd; | |
4216 | + pud_t *pud; | |
4217 | + pmd_t *pmd; | |
4218 | + int cpu_arch = cpu_architecture(); | |
4219 | + unsigned int cr = get_cr(); | |
4220 | + | |
4221 | + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | |
4222 | + /* make pages tables, etc before .text NX */ | |
4223 | + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) { | |
4224 | + pgd = pgd_offset_k(addr); | |
4225 | + pud = pud_offset(pgd, addr); | |
4226 | + pmd = pmd_offset(pud, addr); | |
4227 | + __section_update(pmd, addr, PMD_SECT_XN); | |
4228 | + } | |
4229 | + /* make init NX */ | |
4230 | + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) { | |
4231 | + pgd = pgd_offset_k(addr); | |
4232 | + pud = pud_offset(pgd, addr); | |
4233 | + pmd = pmd_offset(pud, addr); | |
4234 | + __section_update(pmd, addr, PMD_SECT_XN); | |
4235 | + } | |
4236 | + /* make kernel code/rodata RX */ | |
4237 | + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) { | |
4238 | + pgd = pgd_offset_k(addr); | |
4239 | + pud = pud_offset(pgd, addr); | |
4240 | + pmd = pmd_offset(pud, addr); | |
4241 | +#ifdef CONFIG_ARM_LPAE | |
4242 | + __section_update(pmd, addr, PMD_SECT_RDONLY); | |
4243 | +#else | |
4244 | + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE); | |
4245 | +#endif | |
4246 | + } | |
4247 | + } | |
4248 | +#endif | |
4249 | + | |
4250 | +#ifdef CONFIG_HAVE_TCM | |
4251 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | |
4252 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); | |
4253 | #endif | |
4254 | diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c | |
4255 | index d1e5ad7..84dcbf2 100644 | |
4256 | --- a/arch/arm/mm/ioremap.c | |
4257 | +++ b/arch/arm/mm/ioremap.c | |
4258 | @@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) | |
4259 | unsigned int mtype; | |
4260 | ||
4261 | if (cached) | |
4262 | - mtype = MT_MEMORY_RWX; | |
4263 | + mtype = MT_MEMORY_RX; | |
4264 | else | |
4265 | - mtype = MT_MEMORY_RWX_NONCACHED; | |
4266 | + mtype = MT_MEMORY_RX_NONCACHED; | |
4267 | ||
4268 | return __arm_ioremap_caller(phys_addr, size, mtype, | |
4269 | __builtin_return_address(0)); | |
4270 | diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c | |
4271 | index 5e85ed3..b10a7ed 100644 | |
4272 | --- a/arch/arm/mm/mmap.c | |
4273 | +++ b/arch/arm/mm/mmap.c | |
4274 | @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
4275 | struct vm_area_struct *vma; | |
4276 | int do_align = 0; | |
4277 | int aliasing = cache_is_vipt_aliasing(); | |
4278 | + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); | |
4279 | struct vm_unmapped_area_info info; | |
4280 | ||
4281 | /* | |
4282 | @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
4283 | if (len > TASK_SIZE) | |
4284 | return -ENOMEM; | |
4285 | ||
4286 | +#ifdef CONFIG_PAX_RANDMMAP | |
4287 | + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) | |
4288 | +#endif | |
4289 | + | |
4290 | if (addr) { | |
4291 | if (do_align) | |
4292 | addr = COLOUR_ALIGN(addr, pgoff); | |
4293 | @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
4294 | addr = PAGE_ALIGN(addr); | |
4295 | ||
4296 | vma = find_vma(mm, addr); | |
4297 | - if (TASK_SIZE - len >= addr && | |
4298 | - (!vma || addr + len <= vma->vm_start)) | |
4299 | + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) | |
4300 | return addr; | |
4301 | } | |
4302 | ||
4303 | @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
4304 | info.high_limit = TASK_SIZE; | |
4305 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
4306 | info.align_offset = pgoff << PAGE_SHIFT; | |
4307 | + info.threadstack_offset = offset; | |
4308 | return vm_unmapped_area(&info); | |
4309 | } | |
4310 | ||
4311 | @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
4312 | unsigned long addr = addr0; | |
4313 | int do_align = 0; | |
4314 | int aliasing = cache_is_vipt_aliasing(); | |
4315 | + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); | |
4316 | struct vm_unmapped_area_info info; | |
4317 | ||
4318 | /* | |
4319 | @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
4320 | return addr; | |
4321 | } | |
4322 | ||
4323 | +#ifdef CONFIG_PAX_RANDMMAP | |
4324 | + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) | |
4325 | +#endif | |
4326 | + | |
4327 | /* requesting a specific address */ | |
4328 | if (addr) { | |
4329 | if (do_align) | |
4330 | @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
4331 | else | |
4332 | addr = PAGE_ALIGN(addr); | |
4333 | vma = find_vma(mm, addr); | |
4334 | - if (TASK_SIZE - len >= addr && | |
4335 | - (!vma || addr + len <= vma->vm_start)) | |
4336 | + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) | |
4337 | return addr; | |
4338 | } | |
4339 | ||
4340 | @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |
4341 | info.high_limit = mm->mmap_base; | |
4342 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | |
4343 | info.align_offset = pgoff << PAGE_SHIFT; | |
4344 | + info.threadstack_offset = offset; | |
4345 | addr = vm_unmapped_area(&info); | |
4346 | ||
4347 | /* | |
4348 | @@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |
4349 | { | |
4350 | unsigned long random_factor = 0UL; | |
4351 | ||
4352 | +#ifdef CONFIG_PAX_RANDMMAP | |
4353 | + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) | |
4354 | +#endif | |
4355 | + | |
4356 | /* 8 bits of randomness in 20 address space bits */ | |
4357 | if ((current->flags & PF_RANDOMIZE) && | |
4358 | !(current->personality & ADDR_NO_RANDOMIZE)) | |
4359 | @@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |
4360 | ||
4361 | if (mmap_is_legacy()) { | |
4362 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
4363 | + | |
4364 | +#ifdef CONFIG_PAX_RANDMMAP | |
4365 | + if (mm->pax_flags & MF_PAX_RANDMMAP) | |
4366 | + mm->mmap_base += mm->delta_mmap; | |
4367 | +#endif | |
4368 | + | |
4369 | mm->get_unmapped_area = arch_get_unmapped_area; | |
4370 | } else { | |
4371 | mm->mmap_base = mmap_base(random_factor); | |
4372 | + | |
4373 | +#ifdef CONFIG_PAX_RANDMMAP | |
4374 | + if (mm->pax_flags & MF_PAX_RANDMMAP) | |
4375 | + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; | |
4376 | +#endif | |
4377 | + | |
4378 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | |
4379 | } | |
4380 | } | |
4381 | diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c | |
4382 | index 9f98cec..115fcb6 100644 | |
4383 | --- a/arch/arm/mm/mmu.c | |
4384 | +++ b/arch/arm/mm/mmu.c | |
4385 | @@ -40,6 +40,22 @@ | |
4386 | #include "mm.h" | |
4387 | #include "tcm.h" | |
4388 | ||
4389 | +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) | |
4390 | +void modify_domain(unsigned int dom, unsigned int type) | |
4391 | +{ | |
4392 | + struct thread_info *thread = current_thread_info(); | |
4393 | + unsigned int domain = thread->cpu_domain; | |
4394 | + /* | |
4395 | + * DOMAIN_MANAGER might be defined to some other value, | |
4396 | + * use the arch-defined constant | |
4397 | + */ | |
4398 | + domain &= ~domain_val(dom, 3); | |
4399 | + thread->cpu_domain = domain | domain_val(dom, type); | |
4400 | + set_domain(thread->cpu_domain); | |
4401 | +} | |
4402 | +EXPORT_SYMBOL(modify_domain); | |
4403 | +#endif | |
4404 | + | |
4405 | /* | |
4406 | * empty_zero_page is a special page that is used for | |
4407 | * zero-initialized data and COW. | |
4408 | @@ -239,7 +255,15 @@ __setup("noalign", noalign_setup); | |
4409 | #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE | |
4410 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | |
4411 | ||
4412 | -static struct mem_type mem_types[] = { | |
4413 | +#ifdef CONFIG_PAX_KERNEXEC | |
4414 | +#define L_PTE_KERNEXEC L_PTE_RDONLY | |
4415 | +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY | |
4416 | +#else | |
4417 | +#define L_PTE_KERNEXEC L_PTE_DIRTY | |
4418 | +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE | |
4419 | +#endif | |
4420 | + | |
4421 | +static struct mem_type mem_types[] __read_only = { | |
4422 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | |
4423 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | | |
4424 | L_PTE_SHARED, | |
4425 | @@ -268,19 +292,19 @@ static struct mem_type mem_types[] = { | |
4426 | .prot_sect = PROT_SECT_DEVICE, | |
4427 | .domain = DOMAIN_IO, | |
4428 | }, | |
4429 | - [MT_UNCACHED] = { | |
4430 | + [MT_UNCACHED_RW] = { | |
4431 | .prot_pte = PROT_PTE_DEVICE, | |
4432 | .prot_l1 = PMD_TYPE_TABLE, | |
4433 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | |
4434 | .domain = DOMAIN_IO, | |
4435 | }, | |
4436 | - [MT_CACHECLEAN] = { | |
4437 | - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | |
4438 | + [MT_CACHECLEAN_RO] = { | |
4439 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY, | |
4440 | .domain = DOMAIN_KERNEL, | |
4441 | }, | |
4442 | #ifndef CONFIG_ARM_LPAE | |
4443 | - [MT_MINICLEAN] = { | |
4444 | - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, | |
4445 | + [MT_MINICLEAN_RO] = { | |
4446 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY, | |
4447 | .domain = DOMAIN_KERNEL, | |
4448 | }, | |
4449 | #endif | |
4450 | @@ -288,15 +312,15 @@ static struct mem_type mem_types[] = { | |
4451 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
4452 | L_PTE_RDONLY, | |
4453 | .prot_l1 = PMD_TYPE_TABLE, | |
4454 | - .domain = DOMAIN_USER, | |
4455 | + .domain = DOMAIN_VECTORS, | |
4456 | }, | |
4457 | [MT_HIGH_VECTORS] = { | |
4458 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
4459 | L_PTE_USER | L_PTE_RDONLY, | |
4460 | .prot_l1 = PMD_TYPE_TABLE, | |
4461 | - .domain = DOMAIN_USER, | |
4462 | + .domain = DOMAIN_VECTORS, | |
4463 | }, | |
4464 | - [MT_MEMORY_RWX] = { | |
4465 | + [__MT_MEMORY_RWX] = { | |
4466 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | |
4467 | .prot_l1 = PMD_TYPE_TABLE, | |
4468 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | |
4469 | @@ -309,17 +333,30 @@ static struct mem_type mem_types[] = { | |
4470 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | |
4471 | .domain = DOMAIN_KERNEL, | |
4472 | }, | |
4473 | - [MT_ROM] = { | |
4474 | - .prot_sect = PMD_TYPE_SECT, | |
4475 | + [MT_MEMORY_RX] = { | |
4476 | + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, | |
4477 | + .prot_l1 = PMD_TYPE_TABLE, | |
4478 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, | |
4479 | + .domain = DOMAIN_KERNEL, | |
4480 | + }, | |
4481 | + [MT_ROM_RX] = { | |
4482 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, | |
4483 | .domain = DOMAIN_KERNEL, | |
4484 | }, | |
4485 | - [MT_MEMORY_RWX_NONCACHED] = { | |
4486 | + [MT_MEMORY_RW_NONCACHED] = { | |
4487 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
4488 | L_PTE_MT_BUFFERABLE, | |
4489 | .prot_l1 = PMD_TYPE_TABLE, | |
4490 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | |
4491 | .domain = DOMAIN_KERNEL, | |
4492 | }, | |
4493 | + [MT_MEMORY_RX_NONCACHED] = { | |
4494 | + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC | | |
4495 | + L_PTE_MT_BUFFERABLE, | |
4496 | + .prot_l1 = PMD_TYPE_TABLE, | |
4497 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, | |
4498 | + .domain = DOMAIN_KERNEL, | |
4499 | + }, | |
4500 | [MT_MEMORY_RW_DTCM] = { | |
4501 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | |
4502 | L_PTE_XN, | |
4503 | @@ -327,9 +364,10 @@ static struct mem_type mem_types[] = { | |
4504 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | |
4505 | .domain = DOMAIN_KERNEL, | |
4506 | }, | |
4507 | - [MT_MEMORY_RWX_ITCM] = { | |
4508 | - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | |
4509 | + [MT_MEMORY_RX_ITCM] = { | |
4510 | + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, | |
4511 | .prot_l1 = PMD_TYPE_TABLE, | |
4512 | + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, | |
4513 | .domain = DOMAIN_KERNEL, | |
4514 | }, | |
4515 | [MT_MEMORY_RW_SO] = { | |
4516 | @@ -547,9 +585,14 @@ static void __init build_mem_type_table(void) | |
4517 | * Mark cache clean areas and XIP ROM read only | |
4518 | * from SVC mode and no access from userspace. | |
4519 | */ | |
4520 | - mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4521 | - mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4522 | - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4523 | + mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4524 | +#ifdef CONFIG_PAX_KERNEXEC | |
4525 | + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4526 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4527 | + mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4528 | +#endif | |
4529 | + mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4530 | + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | |
4531 | #endif | |
4532 | ||
4533 | /* | |
4534 | @@ -566,13 +609,17 @@ static void __init build_mem_type_table(void) | |
4535 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | |
4536 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | |
4537 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | |
4538 | - mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; | |
4539 | - mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; | |
4540 | + mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; | |
4541 | + mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; | |
4542 | mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; | |
4543 | mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; | |
4544 | + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; | |
4545 | + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED; | |
4546 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; | |
4547 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; | |
4548 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; | |
4549 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S; | |
4550 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED; | |
4551 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S; | |
4552 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED; | |
4553 | } | |
4554 | } | |
4555 | ||
4556 | @@ -583,15 +630,20 @@ static void __init build_mem_type_table(void) | |
4557 | if (cpu_arch >= CPU_ARCH_ARMv6) { | |
4558 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | |
4559 | /* Non-cacheable Normal is XCB = 001 */ | |
4560 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= | |
4561 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= | |
4562 | + PMD_SECT_BUFFERED; | |
4563 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= | |
4564 | PMD_SECT_BUFFERED; | |
4565 | } else { | |
4566 | /* For both ARMv6 and non-TEX-remapping ARMv7 */ | |
4567 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= | |
4568 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= | |
4569 | + PMD_SECT_TEX(1); | |
4570 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= | |
4571 | PMD_SECT_TEX(1); | |
4572 | } | |
4573 | } else { | |
4574 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | |
4575 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | |
4576 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | |
4577 | } | |
4578 | ||
4579 | #ifdef CONFIG_ARM_LPAE | |
4580 | @@ -607,6 +659,8 @@ static void __init build_mem_type_table(void) | |
4581 | vecs_pgprot |= PTE_EXT_AF; | |
4582 | #endif | |
4583 | ||
4584 | + user_pgprot |= __supported_pte_mask; | |
4585 | + | |
4586 | for (i = 0; i < 16; i++) { | |
4587 | pteval_t v = pgprot_val(protection_map[i]); | |
4588 | protection_map[i] = __pgprot(v | user_pgprot); | |
4589 | @@ -624,21 +678,24 @@ static void __init build_mem_type_table(void) | |
4590 | ||
4591 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | |
4592 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | |
4593 | - mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; | |
4594 | - mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; | |
4595 | + mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; | |
4596 | + mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot; | |
4597 | mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; | |
4598 | mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; | |
4599 | + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; | |
4600 | + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot; | |
4601 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; | |
4602 | - mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; | |
4603 | - mem_types[MT_ROM].prot_sect |= cp->pmd; | |
4604 | + mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask; | |
4605 | + mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask; | |
4606 | + mem_types[MT_ROM_RX].prot_sect |= cp->pmd; | |
4607 | ||
4608 | switch (cp->pmd) { | |
4609 | case PMD_SECT_WT: | |
4610 | - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | |
4611 | + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT; | |
4612 | break; | |
4613 | case PMD_SECT_WB: | |
4614 | case PMD_SECT_WBWA: | |
4615 | - mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | |
4616 | + mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB; | |
4617 | break; | |
4618 | } | |
4619 | pr_info("Memory policy: %sData cache %s\n", | |
4620 | @@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md) | |
4621 | return; | |
4622 | } | |
4623 | ||
4624 | - if ((md->type == MT_DEVICE || md->type == MT_ROM) && | |
4625 | + if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) && | |
4626 | md->virtual >= PAGE_OFFSET && | |
4627 | (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { | |
4628 | printk(KERN_WARNING "BUG: mapping for 0x%08llx" | |
4629 | @@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void) | |
4630 | * called function. This means you can't use any function or debugging | |
4631 | * method which may touch any device, otherwise the kernel _will_ crash. | |
4632 | */ | |
4633 | + | |
4634 | +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE); | |
4635 | + | |
4636 | static void __init devicemaps_init(const struct machine_desc *mdesc) | |
4637 | { | |
4638 | struct map_desc map; | |
4639 | unsigned long addr; | |
4640 | - void *vectors; | |
4641 | ||
4642 | - /* | |
4643 | - * Allocate the vector page early. | |
4644 | - */ | |
4645 | - vectors = early_alloc(PAGE_SIZE * 2); | |
4646 | - | |
4647 | - early_trap_init(vectors); | |
4648 | + early_trap_init(&vectors); | |
4649 | ||
4650 | for (addr = VMALLOC_START; addr; addr += PMD_SIZE) | |
4651 | pmd_clear(pmd_off_k(addr)); | |
4652 | @@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) | |
4653 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | |
4654 | map.virtual = MODULES_VADDR; | |
4655 | map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | |
4656 | - map.type = MT_ROM; | |
4657 | + map.type = MT_ROM_RX; | |
4658 | create_mapping(&map); | |
4659 | #endif | |
4660 | ||
4661 | @@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) | |
4662 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | |
4663 | map.virtual = FLUSH_BASE; | |
4664 | map.length = SZ_1M; | |
4665 | - map.type = MT_CACHECLEAN; | |
4666 | + map.type = MT_CACHECLEAN_RO; | |
4667 | create_mapping(&map); | |
4668 | #endif | |
4669 | #ifdef FLUSH_BASE_MINICACHE | |
4670 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | |
4671 | map.virtual = FLUSH_BASE_MINICACHE; | |
4672 | map.length = SZ_1M; | |
4673 | - map.type = MT_MINICLEAN; | |
4674 | + map.type = MT_MINICLEAN_RO; | |
4675 | create_mapping(&map); | |
4676 | #endif | |
4677 | ||
4678 | @@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) | |
4679 | * location (0xffff0000). If we aren't using high-vectors, also | |
4680 | * create a mapping at the low-vectors virtual address. | |
4681 | */ | |
4682 | - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | |
4683 | + map.pfn = __phys_to_pfn(virt_to_phys(&vectors)); | |
4684 | map.virtual = 0xffff0000; | |
4685 | map.length = PAGE_SIZE; | |
4686 | #ifdef CONFIG_KUSER_HELPERS | |
4687 | @@ -1335,8 +1389,10 @@ static void __init kmap_init(void) | |
4688 | static void __init map_lowmem(void) | |
4689 | { | |
4690 | struct memblock_region *reg; | |
4691 | +#ifndef CONFIG_PAX_KERNEXEC | |
4692 | unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); | |
4693 | unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); | |
4694 | +#endif | |
4695 | ||
4696 | /* Map all the lowmem memory banks. */ | |
4697 | for_each_memblock(memory, reg) { | |
4698 | @@ -1349,11 +1405,48 @@ static void __init map_lowmem(void) | |
4699 | if (start >= end) | |
4700 | break; | |
4701 | ||
4702 | +#ifdef CONFIG_PAX_KERNEXEC | |
4703 | + map.pfn = __phys_to_pfn(start); | |
4704 | + map.virtual = __phys_to_virt(start); | |
4705 | + map.length = end - start; | |
4706 | + | |
4707 | + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) { | |
4708 | + struct map_desc kernel; | |
4709 | + struct map_desc initmap; | |
4710 | + | |
4711 | + /* when freeing initmem we will make this RW */ | |
4712 | + initmap.pfn = __phys_to_pfn(__pa(__init_begin)); | |
4713 | + initmap.virtual = (unsigned long)__init_begin; | |
4714 | + initmap.length = _sdata - __init_begin; | |
4715 | + initmap.type = __MT_MEMORY_RWX; | |
4716 | + create_mapping(&initmap); | |
4717 | + | |
4718 | + /* when freeing initmem we will make this RX */ | |
4719 | + kernel.pfn = __phys_to_pfn(__pa(_stext)); | |
4720 | + kernel.virtual = (unsigned long)_stext; | |
4721 | + kernel.length = __init_begin - _stext; | |
4722 | + kernel.type = __MT_MEMORY_RWX; | |
4723 | + create_mapping(&kernel); | |
4724 | + | |
4725 | + if (map.virtual < (unsigned long)_stext) { | |
4726 | + map.length = (unsigned long)_stext - map.virtual; | |
4727 | + map.type = __MT_MEMORY_RWX; | |
4728 | + create_mapping(&map); | |
4729 | + } | |
4730 | + | |
4731 | + map.pfn = __phys_to_pfn(__pa(_sdata)); | |
4732 | + map.virtual = (unsigned long)_sdata; | |
4733 | + map.length = end - __pa(_sdata); | |
4734 | + } | |
4735 | + | |
4736 | + map.type = MT_MEMORY_RW; | |
4737 | + create_mapping(&map); | |
4738 | +#else | |
4739 | if (end < kernel_x_start || start >= kernel_x_end) { | |
4740 | map.pfn = __phys_to_pfn(start); | |
4741 | map.virtual = __phys_to_virt(start); | |
4742 | map.length = end - start; | |
4743 | - map.type = MT_MEMORY_RWX; | |
4744 | + map.type = __MT_MEMORY_RWX; | |
4745 | ||
4746 | create_mapping(&map); | |
4747 | } else { | |
4748 | @@ -1370,7 +1463,7 @@ static void __init map_lowmem(void) | |
4749 | map.pfn = __phys_to_pfn(kernel_x_start); | |
4750 | map.virtual = __phys_to_virt(kernel_x_start); | |
4751 | map.length = kernel_x_end - kernel_x_start; | |
4752 | - map.type = MT_MEMORY_RWX; | |
4753 | + map.type = __MT_MEMORY_RWX; | |
4754 | ||
4755 | create_mapping(&map); | |
4756 | ||
4757 | @@ -1383,6 +1476,7 @@ static void __init map_lowmem(void) | |
4758 | create_mapping(&map); | |
4759 | } | |
4760 | } | |
4761 | +#endif | |
4762 | } | |
4763 | } | |
4764 | ||
4765 | diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c | |
4766 | index e1268f9..a9755a7 100644 | |
4767 | --- a/arch/arm/net/bpf_jit_32.c | |
4768 | +++ b/arch/arm/net/bpf_jit_32.c | |
4769 | @@ -20,6 +20,7 @@ | |
4770 | #include <asm/cacheflush.h> | |
4771 | #include <asm/hwcap.h> | |
4772 | #include <asm/opcodes.h> | |
4773 | +#include <asm/pgtable.h> | |
4774 | ||
4775 | #include "bpf_jit_32.h" | |
4776 | ||
4777 | @@ -71,7 +72,11 @@ struct jit_ctx { | |
4778 | #endif | |
4779 | }; | |
4780 | ||
4781 | +#ifdef CONFIG_GRKERNSEC_BPF_HARDEN | |
4782 | +int bpf_jit_enable __read_only; | |
4783 | +#else | |
4784 | int bpf_jit_enable __read_mostly; | |
4785 | +#endif | |
4786 | ||
4787 | static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) | |
4788 | { | |
4789 | @@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size) | |
4790 | { | |
4791 | u32 *ptr; | |
4792 | /* We are guaranteed to have aligned memory. */ | |
4793 | + pax_open_kernel(); | |
4794 | for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) | |
4795 | *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); | |
4796 | + pax_close_kernel(); | |
4797 | } | |
4798 | ||
4799 | static void build_prologue(struct jit_ctx *ctx) | |
4800 | diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c | |
4801 | index 5b217f4..c23f40e 100644 | |
4802 | --- a/arch/arm/plat-iop/setup.c | |
4803 | +++ b/arch/arm/plat-iop/setup.c | |
4804 | @@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = { | |
4805 | .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, | |
4806 | .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), | |
4807 | .length = IOP3XX_PERIPHERAL_SIZE, | |
4808 | - .type = MT_UNCACHED, | |
4809 | + .type = MT_UNCACHED_RW, | |
4810 | }, | |
4811 | }; | |
4812 | ||
4813 | diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c | |
4814 | index a5bc92d..0bb4730 100644 | |
4815 | --- a/arch/arm/plat-omap/sram.c | |
4816 | +++ b/arch/arm/plat-omap/sram.c | |
4817 | @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size, | |
4818 | * Looks like we need to preserve some bootloader code at the | |
4819 | * beginning of SRAM for jumping to flash for reboot to work... | |
4820 | */ | |
4821 | + pax_open_kernel(); | |
4822 | memset_io(omap_sram_base + omap_sram_skip, 0, | |
4823 | omap_sram_size - omap_sram_skip); | |
4824 | + pax_close_kernel(); | |
4825 | } | |
4826 | diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h | |
4827 | index ce6d763..cfea917 100644 | |
4828 | --- a/arch/arm/plat-samsung/include/plat/dma-ops.h | |
4829 | +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h | |
4830 | @@ -47,7 +47,7 @@ struct samsung_dma_ops { | |
4831 | int (*started)(unsigned ch); | |
4832 | int (*flush)(unsigned ch); | |
4833 | int (*stop)(unsigned ch); | |
4834 | -}; | |
4835 | +} __no_const; | |
4836 | ||
4837 | extern void *samsung_dmadev_get_ops(void); | |
4838 | extern void *s3c_dma_get_ops(void); | |
4839 | diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h | |
4840 | index 6389d60..b5d3bdd 100644 | |
4841 | --- a/arch/arm64/include/asm/barrier.h | |
4842 | +++ b/arch/arm64/include/asm/barrier.h | |
4843 | @@ -41,7 +41,7 @@ | |
4844 | do { \ | |
4845 | compiletime_assert_atomic_type(*p); \ | |
4846 | barrier(); \ | |
4847 | - ACCESS_ONCE(*p) = (v); \ | |
4848 | + ACCESS_ONCE_RW(*p) = (v); \ | |
4849 | } while (0) | |
4850 | ||
4851 | #define smp_load_acquire(p) \ | |
4852 | diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h | |
4853 | index 3bf8f4e..5dd5491 100644 | |
4854 | --- a/arch/arm64/include/asm/uaccess.h | |
4855 | +++ b/arch/arm64/include/asm/uaccess.h | |
4856 | @@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs) | |
4857 | flag; \ | |
4858 | }) | |
4859 | ||
4860 | +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) | |
4861 | #define access_ok(type, addr, size) __range_ok(addr, size) | |
4862 | #define user_addr_max get_fs | |
4863 | ||
4864 | diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h | |
4865 | index c3a58a1..78fbf54 100644 | |
4866 | --- a/arch/avr32/include/asm/cache.h | |
4867 | +++ b/arch/avr32/include/asm/cache.h | |
4868 | @@ -1,8 +1,10 @@ | |
4869 | #ifndef __ASM_AVR32_CACHE_H | |
4870 | #define __ASM_AVR32_CACHE_H | |
4871 | ||
4872 | +#include <linux/const.h> | |
4873 | + | |
4874 | #define L1_CACHE_SHIFT 5 | |
4875 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
4876 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
4877 | ||
4878 | /* | |
4879 | * Memory returned by kmalloc() may be used for DMA, so we must make | |
4880 | diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h | |
4881 | index d232888..87c8df1 100644 | |
4882 | --- a/arch/avr32/include/asm/elf.h | |
4883 | +++ b/arch/avr32/include/asm/elf.h | |
4884 | @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; | |
4885 | the loader. We need to make sure that it is out of the way of the program | |
4886 | that it will "exec", and that there is sufficient room for the brk. */ | |
4887 | ||
4888 | -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | |
4889 | +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | |
4890 | ||
4891 | +#ifdef CONFIG_PAX_ASLR | |
4892 | +#define PAX_ELF_ET_DYN_BASE 0x00001000UL | |
4893 | + | |
4894 | +#define PAX_DELTA_MMAP_LEN 15 | |
4895 | +#define PAX_DELTA_STACK_LEN 15 | |
4896 | +#endif | |
4897 | ||
4898 | /* This yields a mask that user programs can use to figure out what | |
4899 | instruction set this CPU supports. This could be done in user space, | |
4900 | diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h | |
4901 | index 479330b..53717a8 100644 | |
4902 | --- a/arch/avr32/include/asm/kmap_types.h | |
4903 | +++ b/arch/avr32/include/asm/kmap_types.h | |
4904 | @@ -2,9 +2,9 @@ | |
4905 | #define __ASM_AVR32_KMAP_TYPES_H | |
4906 | ||
4907 | #ifdef CONFIG_DEBUG_HIGHMEM | |
4908 | -# define KM_TYPE_NR 29 | |
4909 | +# define KM_TYPE_NR 30 | |
4910 | #else | |
4911 | -# define KM_TYPE_NR 14 | |
4912 | +# define KM_TYPE_NR 15 | |
4913 | #endif | |
4914 | ||
4915 | #endif /* __ASM_AVR32_KMAP_TYPES_H */ | |
4916 | diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c | |
4917 | index 0eca933..eb78c7b 100644 | |
4918 | --- a/arch/avr32/mm/fault.c | |
4919 | +++ b/arch/avr32/mm/fault.c | |
4920 | @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) | |
4921 | ||
4922 | int exception_trace = 1; | |
4923 | ||
4924 | +#ifdef CONFIG_PAX_PAGEEXEC | |
4925 | +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) | |
4926 | +{ | |
4927 | + unsigned long i; | |
4928 | + | |
4929 | + printk(KERN_ERR "PAX: bytes at PC: "); | |
4930 | + for (i = 0; i < 20; i++) { | |
4931 | + unsigned char c; | |
4932 | + if (get_user(c, (unsigned char *)pc+i)) | |
4933 | + printk(KERN_CONT "???????? "); | |
4934 | + else | |
4935 | + printk(KERN_CONT "%02x ", c); | |
4936 | + } | |
4937 | + printk("\n"); | |
4938 | +} | |
4939 | +#endif | |
4940 | + | |
4941 | /* | |
4942 | * This routine handles page faults. It determines the address and the | |
4943 | * problem, and then passes it off to one of the appropriate routines. | |
4944 | @@ -176,6 +193,16 @@ bad_area: | |
4945 | up_read(&mm->mmap_sem); | |
4946 | ||
4947 | if (user_mode(regs)) { | |
4948 | + | |
4949 | +#ifdef CONFIG_PAX_PAGEEXEC | |
4950 | + if (mm->pax_flags & MF_PAX_PAGEEXEC) { | |
4951 | + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { | |
4952 | + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); | |
4953 | + do_group_exit(SIGKILL); | |
4954 | + } | |
4955 | + } | |
4956 | +#endif | |
4957 | + | |
4958 | if (exception_trace && printk_ratelimit()) | |
4959 | printk("%s%s[%d]: segfault at %08lx pc %08lx " | |
4960 | "sp %08lx ecr %lu\n", | |
4961 | diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h | |
4962 | index 568885a..f8008df 100644 | |
4963 | --- a/arch/blackfin/include/asm/cache.h | |
4964 | +++ b/arch/blackfin/include/asm/cache.h | |
4965 | @@ -7,6 +7,7 @@ | |
4966 | #ifndef __ARCH_BLACKFIN_CACHE_H | |
4967 | #define __ARCH_BLACKFIN_CACHE_H | |
4968 | ||
4969 | +#include <linux/const.h> | |
4970 | #include <linux/linkage.h> /* for asmlinkage */ | |
4971 | ||
4972 | /* | |
4973 | @@ -14,7 +15,7 @@ | |
4974 | * Blackfin loads 32 bytes for cache | |
4975 | */ | |
4976 | #define L1_CACHE_SHIFT 5 | |
4977 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
4978 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
4979 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | |
4980 | ||
4981 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES | |
4982 | diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h | |
4983 | index aea2718..3639a60 100644 | |
4984 | --- a/arch/cris/include/arch-v10/arch/cache.h | |
4985 | +++ b/arch/cris/include/arch-v10/arch/cache.h | |
4986 | @@ -1,8 +1,9 @@ | |
4987 | #ifndef _ASM_ARCH_CACHE_H | |
4988 | #define _ASM_ARCH_CACHE_H | |
4989 | ||
4990 | +#include <linux/const.h> | |
4991 | /* Etrax 100LX have 32-byte cache-lines. */ | |
4992 | -#define L1_CACHE_BYTES 32 | |
4993 | #define L1_CACHE_SHIFT 5 | |
4994 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
4995 | ||
4996 | #endif /* _ASM_ARCH_CACHE_H */ | |
4997 | diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h | |
4998 | index 7caf25d..ee65ac5 100644 | |
4999 | --- a/arch/cris/include/arch-v32/arch/cache.h | |
5000 | +++ b/arch/cris/include/arch-v32/arch/cache.h | |
5001 | @@ -1,11 +1,12 @@ | |
5002 | #ifndef _ASM_CRIS_ARCH_CACHE_H | |
5003 | #define _ASM_CRIS_ARCH_CACHE_H | |
5004 | ||
5005 | +#include <linux/const.h> | |
5006 | #include <arch/hwregs/dma.h> | |
5007 | ||
5008 | /* A cache-line is 32 bytes. */ | |
5009 | -#define L1_CACHE_BYTES 32 | |
5010 | #define L1_CACHE_SHIFT 5 | |
5011 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5012 | ||
5013 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | |
5014 | ||
5015 | diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h | |
5016 | index 102190a..5334cea 100644 | |
5017 | --- a/arch/frv/include/asm/atomic.h | |
5018 | +++ b/arch/frv/include/asm/atomic.h | |
5019 | @@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v) | |
5020 | #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) | |
5021 | #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) | |
5022 | ||
5023 | +#define atomic64_read_unchecked(v) atomic64_read(v) | |
5024 | +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) | |
5025 | +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) | |
5026 | +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) | |
5027 | +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) | |
5028 | +#define atomic64_inc_unchecked(v) atomic64_inc(v) | |
5029 | +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) | |
5030 | +#define atomic64_dec_unchecked(v) atomic64_dec(v) | |
5031 | +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) | |
5032 | + | |
5033 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
5034 | { | |
5035 | int c, old; | |
5036 | diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h | |
5037 | index 2797163..c2a401df9 100644 | |
5038 | --- a/arch/frv/include/asm/cache.h | |
5039 | +++ b/arch/frv/include/asm/cache.h | |
5040 | @@ -12,10 +12,11 @@ | |
5041 | #ifndef __ASM_CACHE_H | |
5042 | #define __ASM_CACHE_H | |
5043 | ||
5044 | +#include <linux/const.h> | |
5045 | ||
5046 | /* bytes per L1 cache line */ | |
5047 | #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) | |
5048 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
5049 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5050 | ||
5051 | #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) | |
5052 | #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) | |
5053 | diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h | |
5054 | index 43901f2..0d8b865 100644 | |
5055 | --- a/arch/frv/include/asm/kmap_types.h | |
5056 | +++ b/arch/frv/include/asm/kmap_types.h | |
5057 | @@ -2,6 +2,6 @@ | |
5058 | #ifndef _ASM_KMAP_TYPES_H | |
5059 | #define _ASM_KMAP_TYPES_H | |
5060 | ||
5061 | -#define KM_TYPE_NR 17 | |
5062 | +#define KM_TYPE_NR 18 | |
5063 | ||
5064 | #endif | |
5065 | diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c | |
5066 | index 836f147..4cf23f5 100644 | |
5067 | --- a/arch/frv/mm/elf-fdpic.c | |
5068 | +++ b/arch/frv/mm/elf-fdpic.c | |
5069 | @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |
5070 | { | |
5071 | struct vm_area_struct *vma; | |
5072 | struct vm_unmapped_area_info info; | |
5073 | + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); | |
5074 | ||
5075 | if (len > TASK_SIZE) | |
5076 | return -ENOMEM; | |
5077 | @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |
5078 | if (addr) { | |
5079 | addr = PAGE_ALIGN(addr); | |
5080 | vma = find_vma(current->mm, addr); | |
5081 | - if (TASK_SIZE - len >= addr && | |
5082 | - (!vma || addr + len <= vma->vm_start)) | |
5083 | + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) | |
5084 | goto success; | |
5085 | } | |
5086 | ||
5087 | @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |
5088 | info.high_limit = (current->mm->start_stack - 0x00200000); | |
5089 | info.align_mask = 0; | |
5090 | info.align_offset = 0; | |
5091 | + info.threadstack_offset = offset; | |
5092 | addr = vm_unmapped_area(&info); | |
5093 | if (!(addr & ~PAGE_MASK)) | |
5094 | goto success; | |
5095 | diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h | |
5096 | index 2635117..fa223cb 100644 | |
5097 | --- a/arch/hexagon/include/asm/cache.h | |
5098 | +++ b/arch/hexagon/include/asm/cache.h | |
5099 | @@ -21,9 +21,11 @@ | |
5100 | #ifndef __ASM_CACHE_H | |
5101 | #define __ASM_CACHE_H | |
5102 | ||
5103 | +#include <linux/const.h> | |
5104 | + | |
5105 | /* Bytes per L1 cache line */ | |
5106 | -#define L1_CACHE_SHIFT (5) | |
5107 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
5108 | +#define L1_CACHE_SHIFT 5 | |
5109 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5110 | ||
5111 | #define __cacheline_aligned __aligned(L1_CACHE_BYTES) | |
5112 | #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) | |
5113 | diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig | |
5114 | index c84c88b..2a6e1ba 100644 | |
5115 | --- a/arch/ia64/Kconfig | |
5116 | +++ b/arch/ia64/Kconfig | |
5117 | @@ -549,6 +549,7 @@ source "drivers/sn/Kconfig" | |
5118 | config KEXEC | |
5119 | bool "kexec system call" | |
5120 | depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) | |
5121 | + depends on !GRKERNSEC_KMEM | |
5122 | help | |
5123 | kexec is a system call that implements the ability to shutdown your | |
5124 | current kernel, and to start another kernel. It is like a reboot | |
5125 | diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile | |
5126 | index 5441b14..039a446 100644 | |
5127 | --- a/arch/ia64/Makefile | |
5128 | +++ b/arch/ia64/Makefile | |
5129 | @@ -99,5 +99,6 @@ endef | |
5130 | archprepare: make_nr_irqs_h FORCE | |
5131 | PHONY += make_nr_irqs_h FORCE | |
5132 | ||
5133 | +make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) | |
5134 | make_nr_irqs_h: FORCE | |
5135 | $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h | |
5136 | diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h | |
5137 | index 0bf0350..2ad1957 100644 | |
5138 | --- a/arch/ia64/include/asm/atomic.h | |
5139 | +++ b/arch/ia64/include/asm/atomic.h | |
5140 | @@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v) | |
5141 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
5142 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
5143 | ||
5144 | +#define atomic64_read_unchecked(v) atomic64_read(v) | |
5145 | +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) | |
5146 | +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) | |
5147 | +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) | |
5148 | +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) | |
5149 | +#define atomic64_inc_unchecked(v) atomic64_inc(v) | |
5150 | +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) | |
5151 | +#define atomic64_dec_unchecked(v) atomic64_dec(v) | |
5152 | +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) | |
5153 | + | |
5154 | #endif /* _ASM_IA64_ATOMIC_H */ | |
5155 | diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h | |
5156 | index a48957c..e097b56 100644 | |
5157 | --- a/arch/ia64/include/asm/barrier.h | |
5158 | +++ b/arch/ia64/include/asm/barrier.h | |
5159 | @@ -67,7 +67,7 @@ | |
5160 | do { \ | |
5161 | compiletime_assert_atomic_type(*p); \ | |
5162 | barrier(); \ | |
5163 | - ACCESS_ONCE(*p) = (v); \ | |
5164 | + ACCESS_ONCE_RW(*p) = (v); \ | |
5165 | } while (0) | |
5166 | ||
5167 | #define smp_load_acquire(p) \ | |
5168 | diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h | |
5169 | index 988254a..e1ee885 100644 | |
5170 | --- a/arch/ia64/include/asm/cache.h | |
5171 | +++ b/arch/ia64/include/asm/cache.h | |
5172 | @@ -1,6 +1,7 @@ | |
5173 | #ifndef _ASM_IA64_CACHE_H | |
5174 | #define _ASM_IA64_CACHE_H | |
5175 | ||
5176 | +#include <linux/const.h> | |
5177 | ||
5178 | /* | |
5179 | * Copyright (C) 1998-2000 Hewlett-Packard Co | |
5180 | @@ -9,7 +10,7 @@ | |
5181 | ||
5182 | /* Bytes per L1 (data) cache line. */ | |
5183 | #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT | |
5184 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
5185 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5186 | ||
5187 | #ifdef CONFIG_SMP | |
5188 | # define SMP_CACHE_SHIFT L1_CACHE_SHIFT | |
5189 | diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h | |
5190 | index 5a83c5c..4d7f553 100644 | |
5191 | --- a/arch/ia64/include/asm/elf.h | |
5192 | +++ b/arch/ia64/include/asm/elf.h | |
5193 | @@ -42,6 +42,13 @@ | |
5194 | */ | |
5195 | #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) | |
5196 | ||
5197 | +#ifdef CONFIG_PAX_ASLR | |
5198 | +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) | |
5199 | + | |
5200 | +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) | |
5201 | +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) | |
5202 | +#endif | |
5203 | + | |
5204 | #define PT_IA_64_UNWIND 0x70000001 | |
5205 | ||
5206 | /* IA-64 relocations: */ | |
5207 | diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h | |
5208 | index 5767cdf..7462574 100644 | |
5209 | --- a/arch/ia64/include/asm/pgalloc.h | |
5210 | +++ b/arch/ia64/include/asm/pgalloc.h | |
5211 | @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) | |
5212 | pgd_val(*pgd_entry) = __pa(pud); | |
5213 | } | |
5214 | ||
5215 | +static inline void | |
5216 | +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) | |
5217 | +{ | |
5218 | + pgd_populate(mm, pgd_entry, pud); | |
5219 | +} | |
5220 | + | |
5221 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |
5222 | { | |
5223 | return quicklist_alloc(0, GFP_KERNEL, NULL); | |
5224 | @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) | |
5225 | pud_val(*pud_entry) = __pa(pmd); | |
5226 | } | |
5227 | ||
5228 | +static inline void | |
5229 | +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) | |
5230 | +{ | |
5231 | + pud_populate(mm, pud_entry, pmd); | |
5232 | +} | |
5233 | + | |
5234 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | |
5235 | { | |
5236 | return quicklist_alloc(0, GFP_KERNEL, NULL); | |
5237 | diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h | |
5238 | index 7935115..c0eca6a 100644 | |
5239 | --- a/arch/ia64/include/asm/pgtable.h | |
5240 | +++ b/arch/ia64/include/asm/pgtable.h | |
5241 | @@ -12,7 +12,7 @@ | |
5242 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
5243 | */ | |
5244 | ||
5245 | - | |
5246 | +#include <linux/const.h> | |
5247 | #include <asm/mman.h> | |
5248 | #include <asm/page.h> | |
5249 | #include <asm/processor.h> | |
5250 | @@ -142,6 +142,17 @@ | |
5251 | #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | |
5252 | #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | |
5253 | #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) | |
5254 | + | |
5255 | +#ifdef CONFIG_PAX_PAGEEXEC | |
5256 | +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) | |
5257 | +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | |
5258 | +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | |
5259 | +#else | |
5260 | +# define PAGE_SHARED_NOEXEC PAGE_SHARED | |
5261 | +# define PAGE_READONLY_NOEXEC PAGE_READONLY | |
5262 | +# define PAGE_COPY_NOEXEC PAGE_COPY | |
5263 | +#endif | |
5264 | + | |
5265 | #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) | |
5266 | #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) | |
5267 | #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) | |
5268 | diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h | |
5269 | index 45698cd..e8e2dbc 100644 | |
5270 | --- a/arch/ia64/include/asm/spinlock.h | |
5271 | +++ b/arch/ia64/include/asm/spinlock.h | |
5272 | @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | |
5273 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | |
5274 | ||
5275 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); | |
5276 | - ACCESS_ONCE(*p) = (tmp + 2) & ~1; | |
5277 | + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; | |
5278 | } | |
5279 | ||
5280 | static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) | |
5281 | diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h | |
5282 | index 449c8c0..3d4b1e9 100644 | |
5283 | --- a/arch/ia64/include/asm/uaccess.h | |
5284 | +++ b/arch/ia64/include/asm/uaccess.h | |
5285 | @@ -70,6 +70,7 @@ | |
5286 | && ((segment).seg == KERNEL_DS.seg \ | |
5287 | || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ | |
5288 | }) | |
5289 | +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) | |
5290 | #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) | |
5291 | ||
5292 | /* | |
5293 | @@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use | |
5294 | static inline unsigned long | |
5295 | __copy_to_user (void __user *to, const void *from, unsigned long count) | |
5296 | { | |
5297 | + if (count > INT_MAX) | |
5298 | + return count; | |
5299 | + | |
5300 | + if (!__builtin_constant_p(count)) | |
5301 | + check_object_size(from, count, true); | |
5302 | + | |
5303 | return __copy_user(to, (__force void __user *) from, count); | |
5304 | } | |
5305 | ||
5306 | static inline unsigned long | |
5307 | __copy_from_user (void *to, const void __user *from, unsigned long count) | |
5308 | { | |
5309 | + if (count > INT_MAX) | |
5310 | + return count; | |
5311 | + | |
5312 | + if (!__builtin_constant_p(count)) | |
5313 | + check_object_size(to, count, false); | |
5314 | + | |
5315 | return __copy_user((__force void __user *) to, from, count); | |
5316 | } | |
5317 | ||
5318 | @@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) | |
5319 | ({ \ | |
5320 | void __user *__cu_to = (to); \ | |
5321 | const void *__cu_from = (from); \ | |
5322 | - long __cu_len = (n); \ | |
5323 | + unsigned long __cu_len = (n); \ | |
5324 | \ | |
5325 | - if (__access_ok(__cu_to, __cu_len, get_fs())) \ | |
5326 | + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \ | |
5327 | + if (!__builtin_constant_p(n)) \ | |
5328 | + check_object_size(__cu_from, __cu_len, true); \ | |
5329 | __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ | |
5330 | + } \ | |
5331 | __cu_len; \ | |
5332 | }) | |
5333 | ||
5334 | @@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) | |
5335 | ({ \ | |
5336 | void *__cu_to = (to); \ | |
5337 | const void __user *__cu_from = (from); \ | |
5338 | - long __cu_len = (n); \ | |
5339 | + unsigned long __cu_len = (n); \ | |
5340 | \ | |
5341 | __chk_user_ptr(__cu_from); \ | |
5342 | - if (__access_ok(__cu_from, __cu_len, get_fs())) \ | |
5343 | + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \ | |
5344 | + if (!__builtin_constant_p(n)) \ | |
5345 | + check_object_size(__cu_to, __cu_len, false); \ | |
5346 | __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ | |
5347 | + } \ | |
5348 | __cu_len; \ | |
5349 | }) | |
5350 | ||
5351 | diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c | |
5352 | index 24603be..948052d 100644 | |
5353 | --- a/arch/ia64/kernel/module.c | |
5354 | +++ b/arch/ia64/kernel/module.c | |
5355 | @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) | |
5356 | void | |
5357 | module_free (struct module *mod, void *module_region) | |
5358 | { | |
5359 | - if (mod && mod->arch.init_unw_table && | |
5360 | - module_region == mod->module_init) { | |
5361 | + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { | |
5362 | unw_remove_unwind_table(mod->arch.init_unw_table); | |
5363 | mod->arch.init_unw_table = NULL; | |
5364 | } | |
5365 | @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |
5366 | } | |
5367 | ||
5368 | static inline int | |
5369 | +in_init_rx (const struct module *mod, uint64_t addr) | |
5370 | +{ | |
5371 | + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; | |
5372 | +} | |
5373 | + | |
5374 | +static inline int | |
5375 | +in_init_rw (const struct module *mod, uint64_t addr) | |
5376 | +{ | |
5377 | + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; | |
5378 | +} | |
5379 | + | |
5380 | +static inline int | |
5381 | in_init (const struct module *mod, uint64_t addr) | |
5382 | { | |
5383 | - return addr - (uint64_t) mod->module_init < mod->init_size; | |
5384 | + return in_init_rx(mod, addr) || in_init_rw(mod, addr); | |
5385 | +} | |
5386 | + | |
5387 | +static inline int | |
5388 | +in_core_rx (const struct module *mod, uint64_t addr) | |
5389 | +{ | |
5390 | + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; | |
5391 | +} | |
5392 | + | |
5393 | +static inline int | |
5394 | +in_core_rw (const struct module *mod, uint64_t addr) | |
5395 | +{ | |
5396 | + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; | |
5397 | } | |
5398 | ||
5399 | static inline int | |
5400 | in_core (const struct module *mod, uint64_t addr) | |
5401 | { | |
5402 | - return addr - (uint64_t) mod->module_core < mod->core_size; | |
5403 | + return in_core_rx(mod, addr) || in_core_rw(mod, addr); | |
5404 | } | |
5405 | ||
5406 | static inline int | |
5407 | @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |
5408 | break; | |
5409 | ||
5410 | case RV_BDREL: | |
5411 | - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); | |
5412 | + if (in_init_rx(mod, val)) | |
5413 | + val -= (uint64_t) mod->module_init_rx; | |
5414 | + else if (in_init_rw(mod, val)) | |
5415 | + val -= (uint64_t) mod->module_init_rw; | |
5416 | + else if (in_core_rx(mod, val)) | |
5417 | + val -= (uint64_t) mod->module_core_rx; | |
5418 | + else if (in_core_rw(mod, val)) | |
5419 | + val -= (uint64_t) mod->module_core_rw; | |
5420 | break; | |
5421 | ||
5422 | case RV_LTV: | |
5423 | @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |
5424 | * addresses have been selected... | |
5425 | */ | |
5426 | uint64_t gp; | |
5427 | - if (mod->core_size > MAX_LTOFF) | |
5428 | + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) | |
5429 | /* | |
5430 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated | |
5431 | * at the end of the module. | |
5432 | */ | |
5433 | - gp = mod->core_size - MAX_LTOFF / 2; | |
5434 | + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; | |
5435 | else | |
5436 | - gp = mod->core_size / 2; | |
5437 | - gp = (uint64_t) mod->module_core + ((gp + 7) & -8); | |
5438 | + gp = (mod->core_size_rx + mod->core_size_rw) / 2; | |
5439 | + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); | |
5440 | mod->arch.gp = gp; | |
5441 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); | |
5442 | } | |
5443 | diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c | |
5444 | index c39c3cd..3c77738 100644 | |
5445 | --- a/arch/ia64/kernel/palinfo.c | |
5446 | +++ b/arch/ia64/kernel/palinfo.c | |
5447 | @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, | |
5448 | return NOTIFY_OK; | |
5449 | } | |
5450 | ||
5451 | -static struct notifier_block __refdata palinfo_cpu_notifier = | |
5452 | +static struct notifier_block palinfo_cpu_notifier = | |
5453 | { | |
5454 | .notifier_call = palinfo_cpu_callback, | |
5455 | .priority = 0, | |
5456 | diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c | |
5457 | index 41e33f8..65180b2a 100644 | |
5458 | --- a/arch/ia64/kernel/sys_ia64.c | |
5459 | +++ b/arch/ia64/kernel/sys_ia64.c | |
5460 | @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |
5461 | unsigned long align_mask = 0; | |
5462 | struct mm_struct *mm = current->mm; | |
5463 | struct vm_unmapped_area_info info; | |
5464 | + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); | |
5465 | ||
5466 | if (len > RGN_MAP_LIMIT) | |
5467 | return -ENOMEM; | |
5468 | @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |
5469 | if (REGION_NUMBER(addr) == RGN_HPAGE) | |
5470 | addr = 0; | |
5471 | #endif | |
5472 | + | |
5473 | +#ifdef CONFIG_PAX_RANDMMAP | |
5474 | + if (mm->pax_flags & MF_PAX_RANDMMAP) | |
5475 | + addr = mm->free_area_cache; | |
5476 | + else | |
5477 | +#endif | |
5478 | + | |
5479 | if (!addr) | |
5480 | addr = TASK_UNMAPPED_BASE; | |
5481 | ||
5482 | @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |
5483 | info.high_limit = TASK_SIZE; | |
5484 | info.align_mask = align_mask; | |
5485 | info.align_offset = 0; | |
5486 | + info.threadstack_offset = offset; | |
5487 | return vm_unmapped_area(&info); | |
5488 | } | |
5489 | ||
5490 | diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S | |
5491 | index 84f8a52..7c76178 100644 | |
5492 | --- a/arch/ia64/kernel/vmlinux.lds.S | |
5493 | +++ b/arch/ia64/kernel/vmlinux.lds.S | |
5494 | @@ -192,7 +192,7 @@ SECTIONS { | |
5495 | /* Per-cpu data: */ | |
5496 | . = ALIGN(PERCPU_PAGE_SIZE); | |
5497 | PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) | |
5498 | - __phys_per_cpu_start = __per_cpu_load; | |
5499 | + __phys_per_cpu_start = per_cpu_load; | |
5500 | /* | |
5501 | * ensure percpu data fits | |
5502 | * into percpu page size | |
5503 | diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c | |
5504 | index 7225dad..2a7c8256 100644 | |
5505 | --- a/arch/ia64/mm/fault.c | |
5506 | +++ b/arch/ia64/mm/fault.c | |
5507 | @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) | |
5508 | return pte_present(pte); | |
5509 | } | |
5510 | ||
5511 | +#ifdef CONFIG_PAX_PAGEEXEC | |
5512 | +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) | |
5513 | +{ | |
5514 | + unsigned long i; | |
5515 | + | |
5516 | + printk(KERN_ERR "PAX: bytes at PC: "); | |
5517 | + for (i = 0; i < 8; i++) { | |
5518 | + unsigned int c; | |
5519 | + if (get_user(c, (unsigned int *)pc+i)) | |
5520 | + printk(KERN_CONT "???????? "); | |
5521 | + else | |
5522 | + printk(KERN_CONT "%08x ", c); | |
5523 | + } | |
5524 | + printk("\n"); | |
5525 | +} | |
5526 | +#endif | |
5527 | + | |
5528 | # define VM_READ_BIT 0 | |
5529 | # define VM_WRITE_BIT 1 | |
5530 | # define VM_EXEC_BIT 2 | |
5531 | @@ -151,8 +168,21 @@ retry: | |
5532 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) | |
5533 | goto bad_area; | |
5534 | ||
5535 | - if ((vma->vm_flags & mask) != mask) | |
5536 | + if ((vma->vm_flags & mask) != mask) { | |
5537 | + | |
5538 | +#ifdef CONFIG_PAX_PAGEEXEC | |
5539 | + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { | |
5540 | + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) | |
5541 | + goto bad_area; | |
5542 | + | |
5543 | + up_read(&mm->mmap_sem); | |
5544 | + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); | |
5545 | + do_group_exit(SIGKILL); | |
5546 | + } | |
5547 | +#endif | |
5548 | + | |
5549 | goto bad_area; | |
5550 | + } | |
5551 | ||
5552 | /* | |
5553 | * If for any reason at all we couldn't handle the fault, make | |
5554 | diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c | |
5555 | index 76069c1..c2aa816 100644 | |
5556 | --- a/arch/ia64/mm/hugetlbpage.c | |
5557 | +++ b/arch/ia64/mm/hugetlbpage.c | |
5558 | @@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u | |
5559 | unsigned long pgoff, unsigned long flags) | |
5560 | { | |
5561 | struct vm_unmapped_area_info info; | |
5562 | + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags); | |
5563 | ||
5564 | if (len > RGN_MAP_LIMIT) | |
5565 | return -ENOMEM; | |
5566 | @@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u | |
5567 | info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; | |
5568 | info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); | |
5569 | info.align_offset = 0; | |
5570 | + info.threadstack_offset = offset; | |
5571 | return vm_unmapped_area(&info); | |
5572 | } | |
5573 | ||
5574 | diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c | |
5575 | index 6b33457..88b5124 100644 | |
5576 | --- a/arch/ia64/mm/init.c | |
5577 | +++ b/arch/ia64/mm/init.c | |
5578 | @@ -120,6 +120,19 @@ ia64_init_addr_space (void) | |
5579 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | |
5580 | vma->vm_end = vma->vm_start + PAGE_SIZE; | |
5581 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; | |
5582 | + | |
5583 | +#ifdef CONFIG_PAX_PAGEEXEC | |
5584 | + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { | |
5585 | + vma->vm_flags &= ~VM_EXEC; | |
5586 | + | |
5587 | +#ifdef CONFIG_PAX_MPROTECT | |
5588 | + if (current->mm->pax_flags & MF_PAX_MPROTECT) | |
5589 | + vma->vm_flags &= ~VM_MAYEXEC; | |
5590 | +#endif | |
5591 | + | |
5592 | + } | |
5593 | +#endif | |
5594 | + | |
5595 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
5596 | down_write(¤t->mm->mmap_sem); | |
5597 | if (insert_vm_struct(current->mm, vma)) { | |
5598 | @@ -286,7 +299,7 @@ static int __init gate_vma_init(void) | |
5599 | gate_vma.vm_start = FIXADDR_USER_START; | |
5600 | gate_vma.vm_end = FIXADDR_USER_END; | |
5601 | gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; | |
5602 | - gate_vma.vm_page_prot = __P101; | |
5603 | + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); | |
5604 | ||
5605 | return 0; | |
5606 | } | |
5607 | diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h | |
5608 | index 40b3ee9..8c2c112 100644 | |
5609 | --- a/arch/m32r/include/asm/cache.h | |
5610 | +++ b/arch/m32r/include/asm/cache.h | |
5611 | @@ -1,8 +1,10 @@ | |
5612 | #ifndef _ASM_M32R_CACHE_H | |
5613 | #define _ASM_M32R_CACHE_H | |
5614 | ||
5615 | +#include <linux/const.h> | |
5616 | + | |
5617 | /* L1 cache line size */ | |
5618 | #define L1_CACHE_SHIFT 4 | |
5619 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
5620 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5621 | ||
5622 | #endif /* _ASM_M32R_CACHE_H */ | |
5623 | diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c | |
5624 | index 82abd15..d95ae5d 100644 | |
5625 | --- a/arch/m32r/lib/usercopy.c | |
5626 | +++ b/arch/m32r/lib/usercopy.c | |
5627 | @@ -14,6 +14,9 @@ | |
5628 | unsigned long | |
5629 | __generic_copy_to_user(void __user *to, const void *from, unsigned long n) | |
5630 | { | |
5631 | + if ((long)n < 0) | |
5632 | + return n; | |
5633 | + | |
5634 | prefetch(from); | |
5635 | if (access_ok(VERIFY_WRITE, to, n)) | |
5636 | __copy_user(to,from,n); | |
5637 | @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) | |
5638 | unsigned long | |
5639 | __generic_copy_from_user(void *to, const void __user *from, unsigned long n) | |
5640 | { | |
5641 | + if ((long)n < 0) | |
5642 | + return n; | |
5643 | + | |
5644 | prefetchw(to); | |
5645 | if (access_ok(VERIFY_READ, from, n)) | |
5646 | __copy_user_zeroing(to,from,n); | |
5647 | diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h | |
5648 | index 0395c51..5f26031 100644 | |
5649 | --- a/arch/m68k/include/asm/cache.h | |
5650 | +++ b/arch/m68k/include/asm/cache.h | |
5651 | @@ -4,9 +4,11 @@ | |
5652 | #ifndef __ARCH_M68K_CACHE_H | |
5653 | #define __ARCH_M68K_CACHE_H | |
5654 | ||
5655 | +#include <linux/const.h> | |
5656 | + | |
5657 | /* bytes per L1 cache line */ | |
5658 | #define L1_CACHE_SHIFT 4 | |
5659 | -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) | |
5660 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5661 | ||
5662 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES | |
5663 | ||
5664 | diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h | |
5665 | index c7591e8..ecef036 100644 | |
5666 | --- a/arch/metag/include/asm/barrier.h | |
5667 | +++ b/arch/metag/include/asm/barrier.h | |
5668 | @@ -89,7 +89,7 @@ static inline void fence(void) | |
5669 | do { \ | |
5670 | compiletime_assert_atomic_type(*p); \ | |
5671 | smp_mb(); \ | |
5672 | - ACCESS_ONCE(*p) = (v); \ | |
5673 | + ACCESS_ONCE_RW(*p) = (v); \ | |
5674 | } while (0) | |
5675 | ||
5676 | #define smp_load_acquire(p) \ | |
5677 | diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c | |
5678 | index 3c32075..ae0ae75 100644 | |
5679 | --- a/arch/metag/mm/hugetlbpage.c | |
5680 | +++ b/arch/metag/mm/hugetlbpage.c | |
5681 | @@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len) | |
5682 | info.high_limit = TASK_SIZE; | |
5683 | info.align_mask = PAGE_MASK & HUGEPT_MASK; | |
5684 | info.align_offset = 0; | |
5685 | + info.threadstack_offset = 0; | |
5686 | return vm_unmapped_area(&info); | |
5687 | } | |
5688 | ||
5689 | diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h | |
5690 | index 4efe96a..60e8699 100644 | |
5691 | --- a/arch/microblaze/include/asm/cache.h | |
5692 | +++ b/arch/microblaze/include/asm/cache.h | |
5693 | @@ -13,11 +13,12 @@ | |
5694 | #ifndef _ASM_MICROBLAZE_CACHE_H | |
5695 | #define _ASM_MICROBLAZE_CACHE_H | |
5696 | ||
5697 | +#include <linux/const.h> | |
5698 | #include <asm/registers.h> | |
5699 | ||
5700 | #define L1_CACHE_SHIFT 5 | |
5701 | /* word-granular cache in microblaze */ | |
5702 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
5703 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
5704 | ||
5705 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | |
5706 | ||
5707 | diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig | |
5708 | index 9536ef9..9333776 100644 | |
5709 | --- a/arch/mips/Kconfig | |
5710 | +++ b/arch/mips/Kconfig | |
5711 | @@ -2413,6 +2413,7 @@ source "kernel/Kconfig.preempt" | |
5712 | ||
5713 | config KEXEC | |
5714 | bool "Kexec system call" | |
5715 | + depends on !GRKERNSEC_KMEM | |
5716 | help | |
5717 | kexec is a system call that implements the ability to shutdown your | |
5718 | current kernel, and to start another kernel. It is like a reboot | |
5719 | diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c | |
5720 | index 02f2444..506969c 100644 | |
5721 | --- a/arch/mips/cavium-octeon/dma-octeon.c | |
5722 | +++ b/arch/mips/cavium-octeon/dma-octeon.c | |
5723 | @@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size, | |
5724 | if (dma_release_from_coherent(dev, order, vaddr)) | |
5725 | return; | |
5726 | ||
5727 | - swiotlb_free_coherent(dev, size, vaddr, dma_handle); | |
5728 | + swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs); | |
5729 | } | |
5730 | ||
5731 | static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr) | |
5732 | diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h | |
5733 | index 6dd6bfc..903b0d6 100644 | |
5734 | --- a/arch/mips/include/asm/atomic.h | |
5735 | +++ b/arch/mips/include/asm/atomic.h | |
5736 | @@ -21,15 +21,39 @@ | |
5737 | #include <asm/cmpxchg.h> | |
5738 | #include <asm/war.h> | |
5739 | ||
5740 | +#ifdef CONFIG_GENERIC_ATOMIC64 | |
5741 | +#include <asm-generic/atomic64.h> | |
5742 | +#endif | |
5743 | + | |
5744 | #define ATOMIC_INIT(i) { (i) } | |
5745 | ||
5746 | +#ifdef CONFIG_64BIT | |
5747 | +#define _ASM_EXTABLE(from, to) \ | |
5748 | +" .section __ex_table,\"a\"\n" \ | |
5749 | +" .dword " #from ", " #to"\n" \ | |
5750 | +" .previous\n" | |
5751 | +#else | |
5752 | +#define _ASM_EXTABLE(from, to) \ | |
5753 | +" .section __ex_table,\"a\"\n" \ | |
5754 | +" .word " #from ", " #to"\n" \ | |
5755 | +" .previous\n" | |
5756 | +#endif | |
5757 | + | |
5758 | /* | |
5759 | * atomic_read - read atomic variable | |
5760 | * @v: pointer of type atomic_t | |
5761 | * | |
5762 | * Atomically reads the value of @v. | |
5763 | */ | |
5764 | -#define atomic_read(v) ACCESS_ONCE((v)->counter) | |
5765 | +static inline int atomic_read(const atomic_t *v) | |
5766 | +{ | |
5767 | + return ACCESS_ONCE(v->counter); | |
5768 | +} | |
5769 | + | |
5770 | +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) | |
5771 | +{ | |
5772 | + return ACCESS_ONCE(v->counter); | |
5773 | +} | |
5774 | ||
5775 | /* | |
5776 | * atomic_set - set atomic variable | |
5777 | @@ -38,47 +62,77 @@ | |
5778 | * | |
5779 | * Atomically sets the value of @v to @i. | |
5780 | */ | |
5781 | -#define atomic_set(v, i) ((v)->counter = (i)) | |
5782 | +static inline void atomic_set(atomic_t *v, int i) | |
5783 | +{ | |
5784 | + v->counter = i; | |
5785 | +} | |
5786 | ||
5787 | -#define ATOMIC_OP(op, c_op, asm_op) \ | |
5788 | -static __inline__ void atomic_##op(int i, atomic_t * v) \ | |
5789 | +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) | |
5790 | +{ | |
5791 | + v->counter = i; | |
5792 | +} | |
5793 | + | |
5794 | +#ifdef CONFIG_PAX_REFCOUNT | |
5795 | +#define __OVERFLOW_POST \ | |
5796 | + " b 4f \n" \ | |
5797 | + " .set noreorder \n" \ | |
5798 | + "3: b 5f \n" \ | |
5799 | + " move %0, %1 \n" \ | |
5800 | + " .set reorder \n" | |
5801 | +#define __OVERFLOW_EXTABLE \ | |
5802 | + "3:\n" \ | |
5803 | + _ASM_EXTABLE(2b, 3b) | |
5804 | +#else | |
5805 | +#define __OVERFLOW_POST | |
5806 | +#define __OVERFLOW_EXTABLE | |
5807 | +#endif | |
5808 | + | |
5809 | +#define __ATOMIC_OP(op, suffix, asm_op, extable) \ | |
5810 | +static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \ | |
5811 | { \ | |
5812 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | |
5813 | int temp; \ | |
5814 | \ | |
5815 | __asm__ __volatile__( \ | |
5816 | - " .set arch=r4000 \n" \ | |
5817 | - "1: ll %0, %1 # atomic_" #op " \n" \ | |
5818 | - " " #asm_op " %0, %2 \n" \ | |
5819 | + " .set mips3 \n" \ | |
5820 | + "1: ll %0, %1 # atomic_" #op #suffix "\n" \ | |
5821 | + "2: " #asm_op " %0, %2 \n" \ | |
5822 | " sc %0, %1 \n" \ | |
5823 | " beqzl %0, 1b \n" \ | |
5824 | + extable \ | |
5825 | " .set mips0 \n" \ | |
5826 | : "=&r" (temp), "+m" (v->counter) \ | |
5827 | : "Ir" (i)); \ | |
5828 | } else if (kernel_uses_llsc) { \ | |
5829 | int temp; \ | |
5830 | \ | |
5831 | - do { \ | |
5832 | - __asm__ __volatile__( \ | |
5833 | - " .set arch=r4000 \n" \ | |
5834 | - " ll %0, %1 # atomic_" #op "\n" \ | |
5835 | - " " #asm_op " %0, %2 \n" \ | |
5836 | - " sc %0, %1 \n" \ | |
5837 | - " .set mips0 \n" \ | |
5838 | - : "=&r" (temp), "+m" (v->counter) \ | |
5839 | - : "Ir" (i)); \ | |
5840 | - } while (unlikely(!temp)); \ | |
5841 | + __asm__ __volatile__( \ | |
5842 | + " .set mips3 \n" \ | |
5843 | + " ll %0, %1 # atomic_" #op #suffix "\n" \ | |
5844 | + "2: " #asm_op " %0, %2 \n" \ | |
5845 | + " sc %0, %1 \n" \ | |
5846 | + " beqz %0, 1b \n" \ | |
5847 | + extable \ | |
5848 | + " .set mips0 \n" \ | |
5849 | + : "=&r" (temp), "+m" (v->counter) \ | |
5850 | + : "Ir" (i)); \ | |
5851 | } else { \ | |
5852 | unsigned long flags; \ | |
5853 | \ | |
5854 | raw_local_irq_save(flags); \ | |
5855 | - v->counter c_op i; \ | |
5856 | + __asm__ __volatile__( \ | |
5857 | + "2: " #asm_op " %0, %1 \n" \ | |
5858 | + extable \ | |
5859 | + : "+r" (v->counter) : "Ir" (i)); \ | |
5860 | raw_local_irq_restore(flags); \ | |
5861 | } \ | |
5862 | } \ | |
5863 | ||
5864 | -#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
5865 | -static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | |
5866 | +#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \ | |
5867 | + __ATOMIC_OP(op, _unchecked, asm_op) | |
5868 | + | |
5869 | +#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \ | |
5870 | +static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \ | |
5871 | { \ | |
5872 | int result; \ | |
5873 | \ | |
5874 | @@ -88,37 +142,47 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | |
5875 | int temp; \ | |
5876 | \ | |
5877 | __asm__ __volatile__( \ | |
5878 | - " .set arch=r4000 \n" \ | |
5879 | - "1: ll %1, %2 # atomic_" #op "_return \n" \ | |
5880 | - " " #asm_op " %0, %1, %3 \n" \ | |
5881 | + " .set mips3 \n" \ | |
5882 | + "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \ | |
5883 | + "2: " #asm_op " %0, %1, %3 \n" \ | |
5884 | " sc %0, %2 \n" \ | |
5885 | " beqzl %0, 1b \n" \ | |
5886 | - " " #asm_op " %0, %1, %3 \n" \ | |
5887 | + post_op \ | |
5888 | + extable \ | |
5889 | + "4: " #asm_op " %0, %1, %3 \n" \ | |
5890 | + "5: \n" \ | |
5891 | " .set mips0 \n" \ | |
5892 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | |
5893 | : "Ir" (i)); \ | |
5894 | } else if (kernel_uses_llsc) { \ | |
5895 | int temp; \ | |
5896 | \ | |
5897 | - do { \ | |
5898 | - __asm__ __volatile__( \ | |
5899 | - " .set arch=r4000 \n" \ | |
5900 | - " ll %1, %2 # atomic_" #op "_return \n" \ | |
5901 | - " " #asm_op " %0, %1, %3 \n" \ | |
5902 | - " sc %0, %2 \n" \ | |
5903 | - " .set mips0 \n" \ | |
5904 | - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | |
5905 | - : "Ir" (i)); \ | |
5906 | - } while (unlikely(!result)); \ | |
5907 | + __asm__ __volatile__( \ | |
5908 | + " .set mips3 \n" \ | |
5909 | + "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \ | |
5910 | + "2: " #asm_op " %0, %1, %3 \n" \ | |
5911 | + " sc %0, %2 \n" \ | |
5912 | + " beqz %0, 1b \n" \ | |
5913 | + post_op \ | |
5914 | + extable \ | |
5915 | + "4: " #asm_op " %0, %1, %3 \n" \ | |
5916 | + "5: \n" \ | |
5917 | + " .set mips0 \n" \ | |
5918 | + : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | |
5919 | + : "Ir" (i)); \ | |
5920 | \ | |
5921 | result = temp; result c_op i; \ | |
5922 | } else { \ | |
5923 | unsigned long flags; \ | |
5924 | \ | |
5925 | raw_local_irq_save(flags); \ | |
5926 | - result = v->counter; \ | |
5927 | - result c_op i; \ | |
5928 | - v->counter = result; \ | |
5929 | + __asm__ __volatile__( \ | |
5930 | + " lw %0, %1 \n" \ | |
5931 | + "2: " #asm_op " %0, %1, %2 \n" \ | |
5932 | + " sw %0, %1 \n" \ | |
5933 | + "3: \n" \ | |
5934 | + extable \ | |
5935 | + : "=&r" (result), "+m" (v->counter) : "Ir" (i)); \ | |
5936 | raw_local_irq_restore(flags); \ | |
5937 | } \ | |
5938 | \ | |
5939 | @@ -127,16 +191,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | |
5940 | return result; \ | |
5941 | } | |
5942 | ||
5943 | -#define ATOMIC_OPS(op, c_op, asm_op) \ | |
5944 | - ATOMIC_OP(op, c_op, asm_op) \ | |
5945 | - ATOMIC_OP_RETURN(op, c_op, asm_op) | |
5946 | +#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \ | |
5947 | + __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE) | |
5948 | ||
5949 | -ATOMIC_OPS(add, +=, addu) | |
5950 | -ATOMIC_OPS(sub, -=, subu) | |
5951 | +#define ATOMIC_OPS(op, asm_op) \ | |
5952 | + ATOMIC_OP(op, asm_op) \ | |
5953 | + ATOMIC_OP_RETURN(op, asm_op) | |
5954 | + | |
5955 | +ATOMIC_OPS(add, add) | |
5956 | +ATOMIC_OPS(sub, sub) | |
5957 | ||
5958 | #undef ATOMIC_OPS | |
5959 | #undef ATOMIC_OP_RETURN | |
5960 | +#undef __ATOMIC_OP_RETURN | |
5961 | #undef ATOMIC_OP | |
5962 | +#undef __ATOMIC_OP | |
5963 | ||
5964 | /* | |
5965 | * atomic_sub_if_positive - conditionally subtract integer from atomic variable | |
5966 | @@ -146,7 +215,7 @@ ATOMIC_OPS(sub, -=, subu) | |
5967 | * Atomically test @v and subtract @i if @v is greater or equal than @i. | |
5968 | * The function returns the old value of @v minus @i. | |
5969 | */ | |
5970 | -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |
5971 | +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v) | |
5972 | { | |
5973 | int result; | |
5974 | ||
5975 | @@ -203,8 +272,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |
5976 | return result; | |
5977 | } | |
5978 | ||
5979 | -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | |
5980 | -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) | |
5981 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
5982 | +{ | |
5983 | + return cmpxchg(&v->counter, old, new); | |
5984 | +} | |
5985 | + | |
5986 | +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, | |
5987 | + int new) | |
5988 | +{ | |
5989 | + return cmpxchg(&(v->counter), old, new); | |
5990 | +} | |
5991 | + | |
5992 | +static inline int atomic_xchg(atomic_t *v, int new) | |
5993 | +{ | |
5994 | + return xchg(&v->counter, new); | |
5995 | +} | |
5996 | + | |
5997 | +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) | |
5998 | +{ | |
5999 | + return xchg(&(v->counter), new); | |
6000 | +} | |
6001 | ||
6002 | /** | |
6003 | * __atomic_add_unless - add unless the number is a given value | |
6004 | @@ -232,6 +319,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
6005 | ||
6006 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | |
6007 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | |
6008 | +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v) | |
6009 | +{ | |
6010 | + return atomic_add_return_unchecked(1, v); | |
6011 | +} | |
6012 | ||
6013 | /* | |
6014 | * atomic_sub_and_test - subtract value from variable and test result | |
6015 | @@ -253,6 +344,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
6016 | * other cases. | |
6017 | */ | |
6018 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
6019 | +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) | |
6020 | +{ | |
6021 | + return atomic_add_return_unchecked(1, v) == 0; | |
6022 | +} | |
6023 | ||
6024 | /* | |
6025 | * atomic_dec_and_test - decrement by 1 and test | |
6026 | @@ -277,6 +372,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
6027 | * Atomically increments @v by 1. | |
6028 | */ | |
6029 | #define atomic_inc(v) atomic_add(1, (v)) | |
6030 | +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v) | |
6031 | +{ | |
6032 | + atomic_add_unchecked(1, v); | |
6033 | +} | |
6034 | ||
6035 | /* | |
6036 | * atomic_dec - decrement and test | |
6037 | @@ -285,6 +384,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
6038 | * Atomically decrements @v by 1. | |
6039 | */ | |
6040 | #define atomic_dec(v) atomic_sub(1, (v)) | |
6041 | +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v) | |
6042 | +{ | |
6043 | + atomic_sub_unchecked(1, v); | |
6044 | +} | |
6045 | ||
6046 | /* | |
6047 | * atomic_add_negative - add and test if negative | |
6048 | @@ -306,54 +409,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |
6049 | * @v: pointer of type atomic64_t | |
6050 | * | |
6051 | */ | |
6052 | -#define atomic64_read(v) ACCESS_ONCE((v)->counter) | |
6053 | +static inline long atomic64_read(const atomic64_t *v) | |
6054 | +{ | |
6055 | + return ACCESS_ONCE(v->counter); | |
6056 | +} | |
6057 | + | |
6058 | +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) | |
6059 | +{ | |
6060 | + return ACCESS_ONCE(v->counter); | |
6061 | +} | |
6062 | ||
6063 | /* | |
6064 | * atomic64_set - set atomic variable | |
6065 | * @v: pointer of type atomic64_t | |
6066 | * @i: required value | |
6067 | */ | |
6068 | -#define atomic64_set(v, i) ((v)->counter = (i)) | |
6069 | +static inline void atomic64_set(atomic64_t *v, long i) | |
6070 | +{ | |
6071 | + v->counter = i; | |
6072 | +} | |
6073 | ||
6074 | -#define ATOMIC64_OP(op, c_op, asm_op) \ | |
6075 | -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |
6076 | +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) | |
6077 | +{ | |
6078 | + v->counter = i; | |
6079 | +} | |
6080 | + | |
6081 | +#define __ATOMIC64_OP(op, suffix, asm_op, extable) \ | |
6082 | +static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \ | |
6083 | { \ | |
6084 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | |
6085 | long temp; \ | |
6086 | \ | |
6087 | __asm__ __volatile__( \ | |
6088 | - " .set arch=r4000 \n" \ | |
6089 | - "1: lld %0, %1 # atomic64_" #op " \n" \ | |
6090 | - " " #asm_op " %0, %2 \n" \ | |
6091 | + " .set mips3 \n" \ | |
6092 | + "1: lld %0, %1 # atomic64_" #op #suffix "\n" \ | |
6093 | + "2: " #asm_op " %0, %2 \n" \ | |
6094 | " scd %0, %1 \n" \ | |
6095 | " beqzl %0, 1b \n" \ | |
6096 | + extable \ | |
6097 | " .set mips0 \n" \ | |
6098 | : "=&r" (temp), "+m" (v->counter) \ | |
6099 | : "Ir" (i)); \ | |
6100 | } else if (kernel_uses_llsc) { \ | |
6101 | long temp; \ | |
6102 | \ | |
6103 | - do { \ | |
6104 | - __asm__ __volatile__( \ | |
6105 | - " .set arch=r4000 \n" \ | |
6106 | - " lld %0, %1 # atomic64_" #op "\n" \ | |
6107 | - " " #asm_op " %0, %2 \n" \ | |
6108 | - " scd %0, %1 \n" \ | |
6109 | - " .set mips0 \n" \ | |
6110 | - : "=&r" (temp), "+m" (v->counter) \ | |
6111 | - : "Ir" (i)); \ | |
6112 | - } while (unlikely(!temp)); \ | |
6113 | + __asm__ __volatile__( \ | |
6114 | + " .set mips3 \n" \ | |
6115 | + " lld %0, %1 # atomic64_" #op #suffix "\n" \ | |
6116 | + "2: " #asm_op " %0, %2 \n" \ | |
6117 | + " scd %0, %1 \n" \ | |
6118 | + " beqz %0, 1b \n" \ | |
6119 | + extable \ | |
6120 | + " .set mips0 \n" \ | |
6121 | + : "=&r" (temp), "+m" (v->counter) \ | |
6122 | + : "Ir" (i)); \ | |
6123 | } else { \ | |
6124 | unsigned long flags; \ | |
6125 | \ | |
6126 | raw_local_irq_save(flags); \ | |
6127 | - v->counter c_op i; \ | |
6128 | + __asm__ __volatile__( \ | |
6129 | + "2: " #asm_op " %0, %1 \n" \ | |
6130 | + extable \ | |
6131 | + : "+r" (v->counter) : "Ir" (i)); \ | |
6132 | raw_local_irq_restore(flags); \ | |
6133 | } \ | |
6134 | } \ | |
6135 | ||
6136 | -#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ | |
6137 | -static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |
6138 | +#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \ | |
6139 | + __ATOMIC64_OP(op, _unchecked, asm_op) | |
6140 | + | |
6141 | +#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \ | |
6142 | +static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\ | |
6143 | { \ | |
6144 | long result; \ | |
6145 | \ | |
6146 | @@ -363,38 +489,48 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |
6147 | long temp; \ | |
6148 | \ | |
6149 | __asm__ __volatile__( \ | |
6150 | - " .set arch=r4000 \n" \ | |
6151 | + " .set mips3 \n" \ | |
6152 | "1: lld %1, %2 # atomic64_" #op "_return\n" \ | |
6153 | - " " #asm_op " %0, %1, %3 \n" \ | |
6154 | + "2: " #asm_op " %0, %1, %3 \n" \ | |
6155 | " scd %0, %2 \n" \ | |
6156 | " beqzl %0, 1b \n" \ | |
6157 | - " " #asm_op " %0, %1, %3 \n" \ | |
6158 | + post_op \ | |
6159 | + extable \ | |
6160 | + "4: " #asm_op " %0, %1, %3 \n" \ | |
6161 | + "5: \n" \ | |
6162 | " .set mips0 \n" \ | |
6163 | : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ | |
6164 | : "Ir" (i)); \ | |
6165 | } else if (kernel_uses_llsc) { \ | |
6166 | long temp; \ | |
6167 | \ | |
6168 | - do { \ | |
6169 | - __asm__ __volatile__( \ | |
6170 | - " .set arch=r4000 \n" \ | |
6171 | - " lld %1, %2 # atomic64_" #op "_return\n" \ | |
6172 | - " " #asm_op " %0, %1, %3 \n" \ | |
6173 | - " scd %0, %2 \n" \ | |
6174 | - " .set mips0 \n" \ | |
6175 | - : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ | |
6176 | - : "Ir" (i), "m" (v->counter) \ | |
6177 | - : "memory"); \ | |
6178 | - } while (unlikely(!result)); \ | |
6179 | + __asm__ __volatile__( \ | |
6180 | + " .set mips3 \n" \ | |
6181 | + "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n" \ | |
6182 | + "2: " #asm_op " %0, %1, %3 \n" \ | |
6183 | + " scd %0, %2 \n" \ | |
6184 | + " beqz %0, 1b \n" \ | |
6185 | + post_op \ | |
6186 | + extable \ | |
6187 | + "4: " #asm_op " %0, %1, %3 \n" \ | |
6188 | + "5: \n" \ | |
6189 | + " .set mips0 \n" \ | |
6190 | + : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ | |
6191 | + : "Ir" (i), "m" (v->counter) \ | |
6192 | + : "memory"); \ | |
6193 | \ | |
6194 | result = temp; result c_op i; \ | |
6195 | } else { \ | |
6196 | unsigned long flags; \ | |
6197 | \ | |
6198 | raw_local_irq_save(flags); \ | |
6199 | - result = v->counter; \ | |
6200 | - result c_op i; \ | |
6201 | - v->counter = result; \ | |
6202 | + __asm__ __volatile__( \ | |
6203 | + " ld %0, %1 \n" \ | |
6204 | + "2: " #asm_op " %0, %1, %2 \n" \ | |
6205 | + " sd %0, %1 \n" \ | |
6206 | + "3: \n" \ | |
6207 | + extable \ | |
6208 | + : "=&r" (result), "+m" (v->counter) : "Ir" (i)); \ | |
6209 | raw_local_irq_restore(flags); \ | |
6210 | } \ | |
6211 | \ | |
6212 | @@ -403,16 +539,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |
6213 | return result; \ | |
6214 | } | |
6215 | ||
6216 | -#define ATOMIC64_OPS(op, c_op, asm_op) \ | |
6217 | - ATOMIC64_OP(op, c_op, asm_op) \ | |
6218 | - ATOMIC64_OP_RETURN(op, c_op, asm_op) | |
6219 | +#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \ | |
6220 | + __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE) | |
6221 | ||
6222 | -ATOMIC64_OPS(add, +=, daddu) | |
6223 | -ATOMIC64_OPS(sub, -=, dsubu) | |
6224 | +#define ATOMIC64_OPS(op, asm_op) \ | |
6225 | + ATOMIC64_OP(op, asm_op) \ | |
6226 | + ATOMIC64_OP_RETURN(op, asm_op) | |
6227 | + | |
6228 | +ATOMIC64_OPS(add, dadd) | |
6229 | +ATOMIC64_OPS(sub, dsub) | |
6230 | ||
6231 | #undef ATOMIC64_OPS | |
6232 | #undef ATOMIC64_OP_RETURN | |
6233 | +#undef __ATOMIC64_OP_RETURN | |
6234 | #undef ATOMIC64_OP | |
6235 | +#undef __ATOMIC64_OP | |
6236 | +#undef __OVERFLOW_EXTABLE | |
6237 | +#undef __OVERFLOW_POST | |
6238 | ||
6239 | /* | |
6240 | * atomic64_sub_if_positive - conditionally subtract integer from atomic variable | |
6241 | @@ -422,7 +565,7 @@ ATOMIC64_OPS(sub, -=, dsubu) | |
6242 | * Atomically test @v and subtract @i if @v is greater or equal than @i. | |
6243 | * The function returns the old value of @v minus @i. | |
6244 | */ | |
6245 | -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |
6246 | +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v) | |
6247 | { | |
6248 | long result; | |
6249 | ||
6250 | @@ -479,9 +622,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |
6251 | return result; | |
6252 | } | |
6253 | ||
6254 | -#define atomic64_cmpxchg(v, o, n) \ | |
6255 | - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | |
6256 | -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) | |
6257 | +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) | |
6258 | +{ | |
6259 | + return cmpxchg(&v->counter, old, new); | |
6260 | +} | |
6261 | + | |
6262 | +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, | |
6263 | + long new) | |
6264 | +{ | |
6265 | + return cmpxchg(&(v->counter), old, new); | |
6266 | +} | |
6267 | + | |
6268 | +static inline long atomic64_xchg(atomic64_t *v, long new) | |
6269 | +{ | |
6270 | + return xchg(&v->counter, new); | |
6271 | +} | |
6272 | + | |
6273 | +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) | |
6274 | +{ | |
6275 | + return xchg(&(v->counter), new); | |
6276 | +} | |
6277 | ||
6278 | /** | |
6279 | * atomic64_add_unless - add unless the number is a given value | |
6280 | @@ -511,6 +671,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
6281 | ||
6282 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | |
6283 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | |
6284 | +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v)) | |
6285 | ||
6286 | /* | |
6287 | * atomic64_sub_and_test - subtract value from variable and test result | |
6288 | @@ -532,6 +693,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
6289 | * other cases. | |
6290 | */ | |
6291 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
6292 | +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0) | |
6293 | ||
6294 | /* | |
6295 | * atomic64_dec_and_test - decrement by 1 and test | |
6296 | @@ -556,6 +718,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
6297 | * Atomically increments @v by 1. | |
6298 | */ | |
6299 | #define atomic64_inc(v) atomic64_add(1, (v)) | |
6300 | +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v)) | |
6301 | ||
6302 | /* | |
6303 | * atomic64_dec - decrement and test | |
6304 | @@ -564,6 +727,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |
6305 | * Atomically decrements @v by 1. | |
6306 | */ | |
6307 | #define atomic64_dec(v) atomic64_sub(1, (v)) | |
6308 | +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v)) | |
6309 | ||
6310 | /* | |
6311 | * atomic64_add_negative - add and test if negative | |
6312 | diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h | |
6313 | index d0101dd..266982c 100644 | |
6314 | --- a/arch/mips/include/asm/barrier.h | |
6315 | +++ b/arch/mips/include/asm/barrier.h | |
6316 | @@ -184,7 +184,7 @@ | |
6317 | do { \ | |
6318 | compiletime_assert_atomic_type(*p); \ | |
6319 | smp_mb(); \ | |
6320 | - ACCESS_ONCE(*p) = (v); \ | |
6321 | + ACCESS_ONCE_RW(*p) = (v); \ | |
6322 | } while (0) | |
6323 | ||
6324 | #define smp_load_acquire(p) \ | |
6325 | diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h | |
6326 | index b4db69f..8f3b093 100644 | |
6327 | --- a/arch/mips/include/asm/cache.h | |
6328 | +++ b/arch/mips/include/asm/cache.h | |
6329 | @@ -9,10 +9,11 @@ | |
6330 | #ifndef _ASM_CACHE_H | |
6331 | #define _ASM_CACHE_H | |
6332 | ||
6333 | +#include <linux/const.h> | |
6334 | #include <kmalloc.h> | |
6335 | ||
6336 | #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT | |
6337 | -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | |
6338 | +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) | |
6339 | ||
6340 | #define SMP_CACHE_SHIFT L1_CACHE_SHIFT | |
6341 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | |
6342 | diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h | |
6343 | index 1d38fe0..9beabc9 100644 | |
6344 | --- a/arch/mips/include/asm/elf.h | |
6345 | +++ b/arch/mips/include/asm/elf.h | |
6346 | @@ -381,13 +381,16 @@ extern const char *__elf_platform; | |
6347 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | |
6348 | #endif | |
6349 | ||
6350 | +#ifdef CONFIG_PAX_ASLR | |
6351 | +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) | |
6352 | + | |
6353 | +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6354 | +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6355 | +#endif | |
6356 | + | |
6357 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | |
6358 | struct linux_binprm; | |
6359 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |
6360 | int uses_interp); | |
6361 | ||
6362 | -struct mm_struct; | |
6363 | -extern unsigned long arch_randomize_brk(struct mm_struct *mm); | |
6364 | -#define arch_randomize_brk arch_randomize_brk | |
6365 | - | |
6366 | #endif /* _ASM_ELF_H */ | |
6367 | diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h | |
6368 | index c1f6afa..38cc6e9 100644 | |
6369 | --- a/arch/mips/include/asm/exec.h | |
6370 | +++ b/arch/mips/include/asm/exec.h | |
6371 | @@ -12,6 +12,6 @@ | |
6372 | #ifndef _ASM_EXEC_H | |
6373 | #define _ASM_EXEC_H | |
6374 | ||
6375 | -extern unsigned long arch_align_stack(unsigned long sp); | |
6376 | +#define arch_align_stack(x) ((x) & ~0xfUL) | |
6377 | ||
6378 | #endif /* _ASM_EXEC_H */ | |
6379 | diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h | |
6380 | index 9e8ef59..1139d6b 100644 | |
6381 | --- a/arch/mips/include/asm/hw_irq.h | |
6382 | +++ b/arch/mips/include/asm/hw_irq.h | |
6383 | @@ -10,7 +10,7 @@ | |
6384 | ||
6385 | #include <linux/atomic.h> | |
6386 | ||
6387 | -extern atomic_t irq_err_count; | |
6388 | +extern atomic_unchecked_t irq_err_count; | |
6389 | ||
6390 | /* | |
6391 | * interrupt-retrigger: NOP for now. This may not be appropriate for all | |
6392 | diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h | |
6393 | index 46dfc3c..a16b13a 100644 | |
6394 | --- a/arch/mips/include/asm/local.h | |
6395 | +++ b/arch/mips/include/asm/local.h | |
6396 | @@ -12,15 +12,25 @@ typedef struct | |
6397 | atomic_long_t a; | |
6398 | } local_t; | |
6399 | ||
6400 | +typedef struct { | |
6401 | + atomic_long_unchecked_t a; | |
6402 | +} local_unchecked_t; | |
6403 | + | |
6404 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | |
6405 | ||
6406 | #define local_read(l) atomic_long_read(&(l)->a) | |
6407 | +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) | |
6408 | #define local_set(l, i) atomic_long_set(&(l)->a, (i)) | |
6409 | +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) | |
6410 | ||
6411 | #define local_add(i, l) atomic_long_add((i), (&(l)->a)) | |
6412 | +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a)) | |
6413 | #define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) | |
6414 | +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a)) | |
6415 | #define local_inc(l) atomic_long_inc(&(l)->a) | |
6416 | +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) | |
6417 | #define local_dec(l) atomic_long_dec(&(l)->a) | |
6418 | +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) | |
6419 | ||
6420 | /* | |
6421 | * Same as above, but return the result value | |
6422 | @@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l) | |
6423 | return result; | |
6424 | } | |
6425 | ||
6426 | +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l) | |
6427 | +{ | |
6428 | + unsigned long result; | |
6429 | + | |
6430 | + if (kernel_uses_llsc && R10000_LLSC_WAR) { | |
6431 | + unsigned long temp; | |
6432 | + | |
6433 | + __asm__ __volatile__( | |
6434 | + " .set mips3 \n" | |
6435 | + "1:" __LL "%1, %2 # local_add_return \n" | |
6436 | + " addu %0, %1, %3 \n" | |
6437 | + __SC "%0, %2 \n" | |
6438 | + " beqzl %0, 1b \n" | |
6439 | + " addu %0, %1, %3 \n" | |
6440 | + " .set mips0 \n" | |
6441 | + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) | |
6442 | + : "Ir" (i), "m" (l->a.counter) | |
6443 | + : "memory"); | |
6444 | + } else if (kernel_uses_llsc) { | |
6445 | + unsigned long temp; | |
6446 | + | |
6447 | + __asm__ __volatile__( | |
6448 | + " .set mips3 \n" | |
6449 | + "1:" __LL "%1, %2 # local_add_return \n" | |
6450 | + " addu %0, %1, %3 \n" | |
6451 | + __SC "%0, %2 \n" | |
6452 | + " beqz %0, 1b \n" | |
6453 | + " addu %0, %1, %3 \n" | |
6454 | + " .set mips0 \n" | |
6455 | + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) | |
6456 | + : "Ir" (i), "m" (l->a.counter) | |
6457 | + : "memory"); | |
6458 | + } else { | |
6459 | + unsigned long flags; | |
6460 | + | |
6461 | + local_irq_save(flags); | |
6462 | + result = l->a.counter; | |
6463 | + result += i; | |
6464 | + l->a.counter = result; | |
6465 | + local_irq_restore(flags); | |
6466 | + } | |
6467 | + | |
6468 | + return result; | |
6469 | +} | |
6470 | + | |
6471 | static __inline__ long local_sub_return(long i, local_t * l) | |
6472 | { | |
6473 | unsigned long result; | |
6474 | @@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l) | |
6475 | ||
6476 | #define local_cmpxchg(l, o, n) \ | |
6477 | ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) | |
6478 | +#define local_cmpxchg_unchecked(l, o, n) \ | |
6479 | + ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) | |
6480 | #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) | |
6481 | ||
6482 | /** | |
6483 | diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h | |
6484 | index 3be8180..c4798d5 100644 | |
6485 | --- a/arch/mips/include/asm/page.h | |
6486 | +++ b/arch/mips/include/asm/page.h | |
6487 | @@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, | |
6488 | #ifdef CONFIG_CPU_MIPS32 | |
6489 | typedef struct { unsigned long pte_low, pte_high; } pte_t; | |
6490 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | |
6491 | - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) | |
6492 | + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) | |
6493 | #else | |
6494 | typedef struct { unsigned long long pte; } pte_t; | |
6495 | #define pte_val(x) ((x).pte) | |
6496 | diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h | |
6497 | index b336037..5b874cc 100644 | |
6498 | --- a/arch/mips/include/asm/pgalloc.h | |
6499 | +++ b/arch/mips/include/asm/pgalloc.h | |
6500 | @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
6501 | { | |
6502 | set_pud(pud, __pud((unsigned long)pmd)); | |
6503 | } | |
6504 | + | |
6505 | +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
6506 | +{ | |
6507 | + pud_populate(mm, pud, pmd); | |
6508 | +} | |
6509 | #endif | |
6510 | ||
6511 | /* | |
6512 | diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h | |
6513 | index d6d1928..ce4f822 100644 | |
6514 | --- a/arch/mips/include/asm/pgtable.h | |
6515 | +++ b/arch/mips/include/asm/pgtable.h | |
6516 | @@ -20,6 +20,9 @@ | |
6517 | #include <asm/io.h> | |
6518 | #include <asm/pgtable-bits.h> | |
6519 | ||
6520 | +#define ktla_ktva(addr) (addr) | |
6521 | +#define ktva_ktla(addr) (addr) | |
6522 | + | |
6523 | struct mm_struct; | |
6524 | struct vm_area_struct; | |
6525 | ||
6526 | diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h | |
6527 | index 7de8658..c109224 100644 | |
6528 | --- a/arch/mips/include/asm/thread_info.h | |
6529 | +++ b/arch/mips/include/asm/thread_info.h | |
6530 | @@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void) | |
6531 | #define TIF_SECCOMP 4 /* secure computing */ | |
6532 | #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ | |
6533 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ | |
6534 | +/* li takes a 32bit immediate */ | |
6535 | +#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */ | |
6536 | + | |
6537 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | |
6538 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | |
6539 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ | |
6540 | @@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void) | |
6541 | #define _TIF_USEDMSA (1<<TIF_USEDMSA) | |
6542 | #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE) | |
6543 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | |
6544 | +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) | |
6545 | ||
6546 | #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ | |
6547 | _TIF_SYSCALL_AUDIT | \ | |
6548 | - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) | |
6549 | + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ | |
6550 | + _TIF_GRSEC_SETXID) | |
6551 | ||
6552 | /* work to do in syscall_trace_leave() */ | |
6553 | #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ | |
6554 | - _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) | |
6555 | + _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) | |
6556 | ||
6557 | /* work to do on interrupt/exception return */ | |
6558 | #define _TIF_WORK_MASK \ | |
6559 | @@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void) | |
6560 | /* work to do on any return to u-space */ | |
6561 | #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \ | |
6562 | _TIF_WORK_SYSCALL_EXIT | \ | |
6563 | - _TIF_SYSCALL_TRACEPOINT) | |
6564 | + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) | |
6565 | ||
6566 | /* | |
6567 | * We stash processor id into a COP0 register to retrieve it fast | |
6568 | diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h | |
6569 | index 22a5624..7c96295 100644 | |
6570 | --- a/arch/mips/include/asm/uaccess.h | |
6571 | +++ b/arch/mips/include/asm/uaccess.h | |
6572 | @@ -130,6 +130,7 @@ extern u64 __ua_limit; | |
6573 | __ok == 0; \ | |
6574 | }) | |
6575 | ||
6576 | +#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) | |
6577 | #define access_ok(type, addr, size) \ | |
6578 | likely(__access_ok((addr), (size), __access_mask)) | |
6579 | ||
6580 | diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c | |
6581 | index 1188e00..41cf144 100644 | |
6582 | --- a/arch/mips/kernel/binfmt_elfn32.c | |
6583 | +++ b/arch/mips/kernel/binfmt_elfn32.c | |
6584 | @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |
6585 | #undef ELF_ET_DYN_BASE | |
6586 | #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) | |
6587 | ||
6588 | +#ifdef CONFIG_PAX_ASLR | |
6589 | +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) | |
6590 | + | |
6591 | +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6592 | +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6593 | +#endif | |
6594 | + | |
6595 | #include <asm/processor.h> | |
6596 | #include <linux/module.h> | |
6597 | #include <linux/elfcore.h> | |
6598 | diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c | |
6599 | index 9287678..f870e47 100644 | |
6600 | --- a/arch/mips/kernel/binfmt_elfo32.c | |
6601 | +++ b/arch/mips/kernel/binfmt_elfo32.c | |
6602 | @@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | |
6603 | #undef ELF_ET_DYN_BASE | |
6604 | #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) | |
6605 | ||
6606 | +#ifdef CONFIG_PAX_ASLR | |
6607 | +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) | |
6608 | + | |
6609 | +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6610 | +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) | |
6611 | +#endif | |
6612 | + | |
6613 | #include <asm/processor.h> | |
6614 | ||
6615 | #include <linux/module.h> | |
6616 | diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c | |
6617 | index 50b3648..c2f3cec 100644 | |
6618 | --- a/arch/mips/kernel/i8259.c | |
6619 | +++ b/arch/mips/kernel/i8259.c | |
6620 | @@ -201,7 +201,7 @@ spurious_8259A_irq: | |
6621 | printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); | |
6622 | spurious_irq_mask |= irqmask; | |
6623 | } | |
6624 | - atomic_inc(&irq_err_count); | |
6625 | + atomic_inc_unchecked(&irq_err_count); | |
6626 | /* | |
6627 | * Theoretically we do not have to handle this IRQ, | |
6628 | * but in Linux this does not cause problems and is | |
6629 | diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c | |
6630 | index 44a1f79..2bd6aa3 100644 | |
6631 | --- a/arch/mips/kernel/irq-gt641xx.c | |
6632 | +++ b/arch/mips/kernel/irq-gt641xx.c | |
6633 | @@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void) | |
6634 | } | |
6635 | } | |
6636 | ||
6637 | - atomic_inc(&irq_err_count); | |
6638 | + atomic_inc_unchecked(&irq_err_count); | |
6639 | } | |
6640 | ||
6641 | void __init gt641xx_irq_init(void) | |
6642 | diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c | |
6643 | index d2bfbc2..a8eacd2 100644 | |
6644 | --- a/arch/mips/kernel/irq.c | |
6645 | +++ b/arch/mips/kernel/irq.c | |
6646 | @@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq) | |
6647 | printk("unexpected IRQ # %d\n", irq); | |
6648 | } | |
6649 | ||
6650 | -atomic_t irq_err_count; | |
6651 | +atomic_unchecked_t irq_err_count; | |
6652 | ||
6653 | int arch_show_interrupts(struct seq_file *p, int prec) | |
6654 | { | |
6655 | - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | |
6656 | + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); | |
6657 | return 0; | |
6658 | } | |
6659 | ||
6660 | asmlinkage void spurious_interrupt(void) | |
6661 | { | |
6662 | - atomic_inc(&irq_err_count); | |
6663 | + atomic_inc_unchecked(&irq_err_count); | |
6664 | } | |
6665 | ||
6666 | void __init init_IRQ(void) | |
6667 | @@ -109,7 +109,10 @@ void __init init_IRQ(void) | |
6668 | #endif | |
6669 | } | |
6670 | ||
6671 | + | |
6672 | #ifdef DEBUG_STACKOVERFLOW | |
6673 | +extern void gr_handle_kernel_exploit(void); | |
6674 | + | |
6675 | static inline void check_stack_overflow(void) | |
6676 | { | |
6677 | unsigned long sp; | |
6678 | @@ -125,6 +128,7 @@ static inline void check_stack_overflow(void) | |
6679 | printk("do_IRQ: stack overflow: %ld\n", | |
6680 | sp - sizeof(struct thread_info)); | |
6681 | dump_stack(); | |
6682 | + gr_handle_kernel_exploit(); | |
6683 | } | |
6684 | } | |
6685 | #else | |
6686 | diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c | |
6687 | index 0614717..002fa43 100644 | |
6688 | --- a/arch/mips/kernel/pm-cps.c | |
6689 | +++ b/arch/mips/kernel/pm-cps.c | |
6690 | @@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state) | |
6691 | nc_core_ready_count = nc_addr; | |
6692 | ||
6693 | /* Ensure ready_count is zero-initialised before the assembly runs */ | |
6694 | - ACCESS_ONCE(*nc_core_ready_count) = 0; | |
6695 | + ACCESS_ONCE_RW(*nc_core_ready_count) = 0; | |
6696 | coupled_barrier(&per_cpu(pm_barrier, core), online); | |
6697 | ||
6698 | /* Run the generated entry code */ | |
6699 | diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c | |
6700 | index 636b074..8fbb91f 100644 | |
6701 | --- a/arch/mips/kernel/process.c | |
6702 | +++ b/arch/mips/kernel/process.c | |
6703 | @@ -520,15 +520,3 @@ unsigned long get_wchan(struct task_struct *task) | |
6704 | out: | |
6705 | return pc; | |
6706 | } | |
6707 | - | |
6708 | -/* | |
6709 | - * Don't forget that the stack pointer must be aligned on a 8 bytes | |
6710 | - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. | |
6711 | - */ | |
6712 | -unsigned long arch_align_stack(unsigned long sp) | |
6713 | -{ | |
6714 | - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
6715 | - sp -= get_random_int() & ~PAGE_MASK; | |
6716 | - | |
6717 | - return sp & ALMASK; | |
6718 | -} | |
6719 | diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c | |
6720 | index 9d1487d..10c5da5 100644 | |
6721 | --- a/arch/mips/kernel/ptrace.c | |
6722 | +++ b/arch/mips/kernel/ptrace.c | |
6723 | @@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request, | |
6724 | return ret; | |
6725 | } | |
6726 | ||
6727 | +#ifdef CONFIG_GRKERNSEC_SETXID | |
6728 | +extern void gr_delayed_cred_worker(void); | |
6729 | +#endif | |
6730 | + | |
6731 | /* | |
6732 | * Notification of system call entry/exit | |
6733 | * - triggered by current->work.syscall_trace | |
6734 | @@ -777,6 +781,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) | |
6735 | tracehook_report_syscall_entry(regs)) | |
6736 | ret = -1; | |
6737 | ||
6738 | +#ifdef CONFIG_GRKERNSEC_SETXID | |
6739 | + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) | |
6740 | + gr_delayed_cred_worker(); | |
6741 | +#endif | |
6742 | + | |
6743 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | |
6744 | trace_sys_enter(regs, regs->regs[2]); | |
6745 | ||
6746 | diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c | |
6747 | index 07fc524..b9d7f28 100644 | |
6748 | --- a/arch/mips/kernel/reset.c | |
6749 | +++ b/arch/mips/kernel/reset.c | |
6750 | @@ -13,6 +13,7 @@ | |
6751 | #include <linux/reboot.h> | |
6752 | ||
6753 | #include <asm/reboot.h> | |
6754 | +#include <asm/bug.h> | |
6755 | ||
6756 | /* | |
6757 | * Urgs ... Too many MIPS machines to handle this in a generic way. | |
6758 | @@ -29,16 +30,19 @@ void machine_restart(char *command) | |
6759 | { | |
6760 | if (_machine_restart) | |
6761 | _machine_restart(command); | |
6762 | + BUG(); | |
6763 | } | |
6764 | ||
6765 | void machine_halt(void) | |
6766 | { | |
6767 | if (_machine_halt) | |
6768 | _machine_halt(); | |
6769 | + BUG(); | |
6770 | } | |
6771 | ||
6772 | void machine_power_off(void) | |
6773 | { | |
6774 | if (pm_power_off) | |
6775 | pm_power_off(); | |
6776 | + BUG(); | |
6777 | } | |
6778 | diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c | |
6779 | index 2242bdd..b284048 100644 | |
6780 | --- a/arch/mips/kernel/sync-r4k.c | |
6781 | +++ b/arch/mips/kernel/sync-r4k.c | |
6782 | @@ -18,8 +18,8 @@ | |
6783 | #include <asm/mipsregs.h> | |
6784 | ||
6785 | static atomic_t count_start_flag = ATOMIC_INIT(0); | |
6786 | -static atomic_t count_count_start = ATOMIC_INIT(0); | |
6787 | -static atomic_t count_count_stop = ATOMIC_INIT(0); | |
6788 | +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0); | |
6789 | +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0); | |
6790 | static atomic_t count_reference = ATOMIC_INIT(0); | |
6791 | ||
6792 | #define COUNTON 100 | |
6793 | @@ -58,13 +58,13 @@ void synchronise_count_master(int cpu) | |
6794 | ||
6795 | for (i = 0; i < NR_LOOPS; i++) { | |
6796 | /* slaves loop on '!= 2' */ | |
6797 | - while (atomic_read(&count_count_start) != 1) | |
6798 | + while (atomic_read_unchecked(&count_count_start) != 1) | |
6799 | mb(); | |
6800 | - atomic_set(&count_count_stop, 0); | |
6801 | + atomic_set_unchecked(&count_count_stop, 0); | |
6802 | smp_wmb(); | |
6803 | ||
6804 | /* this lets the slaves write their count register */ | |
6805 | - atomic_inc(&count_count_start); | |
6806 | + atomic_inc_unchecked(&count_count_start); | |
6807 | ||
6808 | /* | |
6809 | * Everyone initialises count in the last loop: | |
6810 | @@ -75,11 +75,11 @@ void synchronise_count_master(int cpu) | |
6811 | /* | |
6812 | * Wait for all slaves to leave the synchronization point: | |
6813 | */ | |
6814 | - while (atomic_read(&count_count_stop) != 1) | |
6815 | + while (atomic_read_unchecked(&count_count_stop) != 1) | |
6816 | mb(); | |
6817 | - atomic_set(&count_count_start, 0); | |
6818 | + atomic_set_unchecked(&count_count_start, 0); | |
6819 | smp_wmb(); | |
6820 | - atomic_inc(&count_count_stop); | |
6821 | + atomic_inc_unchecked(&count_count_stop); | |
6822 | } | |
6823 | /* Arrange for an interrupt in a short while */ | |
6824 | write_c0_compare(read_c0_count() + COUNTON); | |
6825 | @@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu) | |
6826 | initcount = atomic_read(&count_reference); | |
6827 | ||
6828 | for (i = 0; i < NR_LOOPS; i++) { | |
6829 | - atomic_inc(&count_count_start); | |
6830 | - while (atomic_read(&count_count_start) != 2) | |
6831 | + atomic_inc_unchecked(&count_count_start); | |
6832 | + while (atomic_read_unchecked(&count_count_start) != 2) | |
6833 | mb(); | |
6834 | ||
6835 | /* | |
6836 | @@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu) | |
6837 | if (i == NR_LOOPS-1) | |
6838 | write_c0_count(initcount); | |
6839 | ||
6840 | - atomic_inc(&count_count_stop); | |
6841 | - while (atomic_read(&count_count_stop) != 2) | |
6842 | + atomic_inc_unchecked(&count_count_stop); | |
6843 | + while (atomic_read_unchecked(&count_count_stop) != 2) | |
6844 | mb(); | |
6845 | } | |
6846 | /* Arrange for an interrupt in a short while */ | |
6847 | diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c | |
6848 | index 22b19c2..c5cc8c4 100644 | |
6849 | --- a/arch/mips/kernel/traps.c | |
6850 | +++ b/arch/mips/kernel/traps.c | |
6851 | @@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs) | |
6852 | siginfo_t info; | |
6853 | ||
6854 | prev_state = exception_enter(); | |
6855 | - die_if_kernel("Integer overflow", regs); | |
6856 | + if (unlikely(!user_mode(regs))) { | |
6857 | + | |
6858 | +#ifdef CONFIG_PAX_REFCOUNT | |
6859 | + if (fixup_exception(regs)) { | |
6860 | + pax_report_refcount_overflow(regs); | |
6861 | + exception_exit(prev_state); | |
6862 | + return; | |
6863 | + } | |
6864 | +#endif | |
6865 | + | |
6866 | + die("Integer overflow", regs); | |
6867 | + } | |
6868 | ||
6869 | info.si_code = FPE_INTOVF; | |
6870 | info.si_signo = SIGFPE; | |
6871 | diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c | |
6872 | index e3b21e5..ea5ff7c 100644 | |
6873 | --- a/arch/mips/kvm/mips.c | |
6874 | +++ b/arch/mips/kvm/mips.c | |
6875 | @@ -805,7 +805,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |
6876 | return r; | |
6877 | } | |
6878 | ||
6879 | -int kvm_arch_init(void *opaque) | |
6880 | +int kvm_arch_init(const void *opaque) | |
6881 | { | |
6882 | if (kvm_mips_callbacks) { | |
6883 | kvm_err("kvm: module already exists\n"); | |
6884 | diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c | |
6885 | index becc42b..9e43d4b 100644 | |
6886 | --- a/arch/mips/mm/fault.c | |
6887 | +++ b/arch/mips/mm/fault.c | |
6888 | @@ -28,6 +28,23 @@ | |
6889 | #include <asm/highmem.h> /* For VMALLOC_END */ | |
6890 | #include <linux/kdebug.h> | |
6891 | ||
6892 | +#ifdef CONFIG_PAX_PAGEEXEC | |
6893 | +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) | |
6894 | +{ | |
6895 | + unsigned long i; | |
6896 | + | |
6897 | + printk(KERN_ERR "PAX: bytes at PC: "); | |
6898 | + for (i = 0; i < 5; i++) { | |
6899 | + unsigned int c; | |
6900 | + if (get_user(c, (unsigned int *)pc+i)) | |
6901 | + printk(KERN_CONT "???????? "); | |
6902 | + else | |
6903 | + printk(KERN_CONT "%08x ", c); | |
6904 | + } | |
6905 | + printk("\n"); | |
6906 | +} | |
6907 | +#endif | |
6908 | + | |
6909 | /* | |
6910 | * This routine handles page faults. It determines the address, | |
6911 | * and the problem, and then passes it off to one of the appropriate | |
6912 | @@ -199,6 +216,14 @@ bad_area: | |
6913 | bad_area_nosemaphore: | |
6914 | /* User mode accesses just cause a SIGSEGV */ | |
6915 | if (user_mode(regs)) { | |
6916 | + | |
6917 | +#ifdef CONFIG_PAX_PAGEEXEC | |
6918 | + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) { | |
6919 | + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs)); | |
6920 | + do_group_exit(SIGKILL); | |
6921 | + } | |
6922 | +#endif | |
6923 | + | |
6924 | tsk->thread.cp0_badvaddr = address; | |
6925 | tsk->thread.error_code = write; | |
6926 | #if 0 | |
6927 | diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c | |
6928 | index f1baadd..5472dca 100644 | |
6929 | --- a/arch/mips/mm/mmap.c | |
6930 | +++ b/arch/mips/mm/mmap.c | |
6931 | @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, | |
6932 | struct vm_area_struct *vma; | |
6933 | unsigned long addr = addr0; | |
6934 | int do_color_align; | |
6935 | + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); | |
6936 | struct vm_unmapped_area_info info; | |
6937 | ||
6938 | if (unlikely(len > TASK_SIZE)) | |
6939 | @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, | |
6940 | do_color_align = 1; | |
6941 | ||
6942 | /* requesting a specific address */ | |
6943 | + | |
6944 | +#ifdef CONFIG_PAX_RANDMMAP | |
6945 | + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) | |
6946 | +#endif | |
6947 | + | |
6948 | if (addr) { | |
6949 | if (do_color_align) | |
6950 | addr = COLOUR_ALIGN(addr, pgoff); | |
6951 | @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, | |
6952 | addr = PAGE_ALIGN(addr); | |
6953 | ||
6954 | vma = find_vma(mm, addr); | |
6955 | - if (TASK_SIZE - len >= addr && | |
6956 | - (!vma || addr + len <= vma->vm_start)) | |
6957 | + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) | |
6958 | return addr; | |
6959 | } | |
6960 | ||
6961 | info.length = len; | |
6962 | info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; | |
6963 | info.align_offset = pgoff << PAGE_SHIFT; | |
6964 | + info.threadstack_offset = offset; | |
6965 | ||
6966 | if (dir == DOWN) { | |
6967 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
6968 | @@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |
6969 | { | |
6970 | unsigned long random_factor = 0UL; | |
6971 | ||
6972 | +#ifdef CONFIG_PAX_RANDMMAP | |
6973 | + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) | |
6974 | +#endif | |
6975 | + | |
6976 | if (current->flags & PF_RANDOMIZE) { | |
6977 | random_factor = get_random_int(); | |
6978 | random_factor = random_factor << PAGE_SHIFT; | |
6979 | @@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |
6980 | ||
6981 | if (mmap_is_legacy()) { | |
6982 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
6983 | + | |
6984 | +#ifdef CONFIG_PAX_RANDMMAP | |
6985 | + if (mm->pax_flags & MF_PAX_RANDMMAP) | |
6986 | + mm->mmap_base += mm->delta_mmap; | |
6987 | +#endif | |
6988 | + | |
6989 | mm->get_unmapped_area = arch_get_unmapped_area; | |
6990 | } else { | |
6991 | mm->mmap_base = mmap_base(random_factor); | |
6992 | + | |
6993 | +#ifdef CONFIG_PAX_RANDMMAP | |
6994 | + if (mm->pax_flags & MF_PAX_RANDMMAP) | |
6995 | + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; | |
6996 | +#endif | |
6997 | + | |
6998 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | |
6999 | } | |
7000 | } | |
7001 | ||
7002 | -static inline unsigned long brk_rnd(void) | |
7003 | -{ | |
7004 | - unsigned long rnd = get_random_int(); | |
7005 | - | |
7006 | - rnd = rnd << PAGE_SHIFT; | |
7007 | - /* 8MB for 32bit, 256MB for 64bit */ | |
7008 | - if (TASK_IS_32BIT_ADDR) | |
7009 | - rnd = rnd & 0x7ffffful; | |
7010 | - else | |
7011 | - rnd = rnd & 0xffffffful; | |
7012 | - | |
7013 | - return rnd; | |
7014 | -} | |
7015 | - | |
7016 | -unsigned long arch_randomize_brk(struct mm_struct *mm) | |
7017 | -{ | |
7018 | - unsigned long base = mm->brk; | |
7019 | - unsigned long ret; | |
7020 | - | |
7021 | - ret = PAGE_ALIGN(base + brk_rnd()); | |
7022 | - | |
7023 | - if (ret < mm->brk) | |
7024 | - return mm->brk; | |
7025 | - | |
7026 | - return ret; | |
7027 | -} | |
7028 | - | |
7029 | int __virt_addr_valid(const volatile void *kaddr) | |
7030 | { | |
7031 | return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); | |
7032 | diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c | |
7033 | index 59cccd9..f39ac2f 100644 | |
7034 | --- a/arch/mips/pci/pci-octeon.c | |
7035 | +++ b/arch/mips/pci/pci-octeon.c | |
7036 | @@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, | |
7037 | ||
7038 | ||
7039 | static st |